diff --git a/.gitattributes b/.gitattributes index eb3511dd34fc0932771bb312cf3e0bc56cde7932..51fe98e19c0f64a38d180a36c5b306971f958bed 100644 --- a/.gitattributes +++ b/.gitattributes @@ -92,3 +92,5 @@ venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..90d5463eb4f3e3c07654e747ba0cc781da3b2ca3 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0b5bb03172e64f5c4b727b037425d48d61741cf89000ad3d78227e7ef55527e +size 9387 diff --git a/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..cfc53b0b022453d3fdfb335e635d603138568ef7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:336861d14a1900989602d36ca298161f6dfb4aa32f1020087ea911d2b648102a +size 9293 diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fba2107e20f91e50460e6f6f06ce47b9e93aeb8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682654 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9176f9e25b4a0459e60700be93c7b95d02d3a8a1 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:30:54,810 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:59,263 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:59,265 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:59,266 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step120'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step120'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..32efcae6f8e7c1e777a34a5dc2fd863b8d0802be --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:54.676632", + "startedAt": "2024-05-14T10:30:54.276855", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.339907894737, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.996, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3272.876, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.996, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92251205444336 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..e057a2fbb0ad620c3e8e6a8a7a4f9350138d9475 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 10:30:54,297 INFO StreamThr :8266 [internal.py:wandb_internal():85] W&B internal server running at pid: 8266, started at: 2024-05-14 10:30:54.296584 +2024-05-14 10:30:54,299 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:30:54,300 INFO WriterThread:8266 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/run-hd0wx5w4.wandb +2024-05-14 10:30:54,301 DEBUG SenderThread:8266 [sender.py:send():378] send: header +2024-05-14 10:30:54,310 DEBUG SenderThread:8266 [sender.py:send():378] send: run +2024-05-14 10:30:54,517 INFO SenderThread:8266 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files +2024-05-14 10:30:54,517 INFO SenderThread:8266 [sender.py:_start_run_threads():1123] run started: hd0wx5w4 with start time 1715682654.296393 +2024-05-14 10:30:54,524 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:30:54,524 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:30:54,608 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:30:54,610 DEBUG HandlerThread:8266 [system_info.py:__init__():26] System info init +2024-05-14 10:30:54,610 DEBUG HandlerThread:8266 [system_info.py:__init__():41] System info init done +2024-05-14 10:30:54,610 INFO HandlerThread:8266 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:30:54,610 INFO SystemMonitor:8266 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:30:54,610 INFO HandlerThread:8266 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:30:54,610 INFO SystemMonitor:8266 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:30:54,611 INFO SystemMonitor:8266 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:30:54,611 INFO SystemMonitor:8266 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:30:54,611 INFO SystemMonitor:8266 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:30:54,676 DEBUG HandlerThread:8266 [system_info.py:probe():150] Probing system +2024-05-14 10:30:54,684 DEBUG HandlerThread:8266 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:30:54,705 ERROR HandlerThread:8266 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:30:54,705 DEBUG HandlerThread:8266 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:30:54,705 DEBUG HandlerThread:8266 [system_info.py:probe():198] Probing system done +2024-05-14 10:30:54,705 DEBUG HandlerThread:8266 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:54.676632', 'startedAt': '2024-05-14T10:30:54.276855', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.339907894737, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.003, 'min': 800.0, 'max': 3400.0}, {'current': 3299.996, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3272.876, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.996, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92251205444336}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:30:54,705 INFO HandlerThread:8266 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:30:54,705 INFO HandlerThread:8266 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:30:54,706 INFO HandlerThread:8266 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:30:54,710 DEBUG SenderThread:8266 [sender.py:send():378] send: files +2024-05-14 10:30:54,710 INFO SenderThread:8266 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:30:54,807 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:30:54,807 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:30:54,808 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:54,809 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:55,006 DEBUG SenderThread:8266 [sender.py:send():378] send: telemetry +2024-05-14 10:30:55,221 INFO wandb-upload_0:8266 [upload_job.py:push():130] Uploaded file /tmp/tmp59i13vk2wandb/uygq9ac7-wandb-metadata.json +2024-05-14 10:30:55,518 INFO Thread-12 :8266 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt +2024-05-14 10:30:55,519 INFO Thread-12 :8266 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-metadata.json +2024-05-14 10:30:55,519 INFO Thread-12 :8266 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log +2024-05-14 10:30:57,519 INFO Thread-12 :8266 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log +2024-05-14 10:31:00,266 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:00,346 DEBUG SenderThread:8266 [sender.py:send():378] send: exit +2024-05-14 10:31:00,346 INFO SenderThread:8266 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:31:00,346 INFO SenderThread:8266 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:31:00,347 INFO SenderThread:8266 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:00,347 INFO SenderThread:8266 [sender.py:send_exit():593] send defer +2024-05-14 10:31:00,347 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,347 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:31:00,347 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,347 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:31:00,347 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:31:00,348 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,348 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:31:00,348 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,348 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:31:00,348 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:31:00,348 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,348 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:31:00,348 INFO HandlerThread:8266 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:31:00,348 DEBUG SystemMonitor:8266 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:31:00,348 INFO HandlerThread:8266 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:31:00,348 DEBUG SystemMonitor:8266 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:31:00,349 INFO HandlerThread:8266 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:31:00,349 DEBUG SystemMonitor:8266 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:31:00,349 INFO HandlerThread:8266 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:31:00,350 INFO HandlerThread:8266 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:31:00,350 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,350 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:31:00,350 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:31:00,350 DEBUG SenderThread:8266 [sender.py:send():378] send: stats +2024-05-14 10:31:00,350 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,351 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:31:00,351 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,351 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:31:00,351 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:31:00,351 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,351 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:31:00,351 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,351 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:31:00,351 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:31:00,351 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,351 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:31:00,352 DEBUG SenderThread:8266 [sender.py:send():378] send: summary +2024-05-14 10:31:00,352 INFO SenderThread:8266 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:00,352 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,352 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:31:00,352 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:31:00,352 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,352 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:31:00,353 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,353 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:31:00,355 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:00,423 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:31:00,423 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,424 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:31:00,424 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,424 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:31:00,522 INFO Thread-12 :8266 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml +2024-05-14 10:31:00,522 INFO Thread-12 :8266 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json +2024-05-14 10:31:01,018 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:31:01,018 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,018 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:31:01,019 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,019 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:31:01,019 INFO SenderThread:8266 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:31:01,019 INFO SenderThread:8266 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:31:01,019 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:31:01,019 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,019 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:31:01,019 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,019 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:31:01,019 INFO SenderThread:8266 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:31:01,346 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:01,524 INFO SenderThread:8266 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log +2024-05-14 10:31:01,524 INFO SenderThread:8266 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files +2024-05-14 10:31:01,524 INFO SenderThread:8266 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json wandb-summary.json +2024-05-14 10:31:01,524 INFO SenderThread:8266 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt requirements.txt +2024-05-14 10:31:01,525 INFO SenderThread:8266 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:31:01,525 INFO SenderThread:8266 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml config.yaml +2024-05-14 10:31:01,526 INFO SenderThread:8266 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log output.log +2024-05-14 10:31:01,528 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:31:01,529 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:01,529 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,530 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:31:01,531 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,531 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:31:01,531 INFO SenderThread:8266 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:01,775 INFO wandb-upload_1:8266 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/wandb-summary.json +2024-05-14 10:31:01,935 INFO wandb-upload_2:8266 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/config.yaml +2024-05-14 10:31:02,021 INFO wandb-upload_0:8266 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/requirements.txt +2024-05-14 10:31:02,022 INFO wandb-upload_3:8266 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/files/output.log +2024-05-14 10:31:02,223 INFO Thread-11 (_thread_body):8266 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:31:02,223 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,223 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:31:02,224 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,224 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:31:02,224 INFO SenderThread:8266 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:31:02,224 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:31:02,225 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,225 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:31:02,225 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,225 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:31:02,225 INFO SenderThread:8266 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:02,346 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,431 INFO SenderThread:8266 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:02,432 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:02,432 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,432 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,432 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:02,432 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,432 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:02,432 INFO SenderThread:8266 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:02,432 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,432 INFO HandlerThread:8266 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:02,432 DEBUG SenderThread:8266 [sender.py:send():378] send: final +2024-05-14 10:31:02,432 DEBUG SenderThread:8266 [sender.py:send():378] send: footer +2024-05-14 10:31:02,432 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,433 INFO SenderThread:8266 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:02,433 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,433 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,433 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,433 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:02,433 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,433 DEBUG SenderThread:8266 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:02,434 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:02,435 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:02,435 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:02,496 INFO MainThread:8266 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:02,496 INFO MainThread:8266 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:02,496 INFO MainThread:8266 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:02,496 DEBUG HandlerThread:8266 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:02,496 INFO HandlerThread:8266 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:03,433 INFO WriterThread:8266 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/run-hd0wx5w4.wandb +2024-05-14 10:31:03,496 INFO SenderThread:8266 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:03,496 INFO SenderThread:8266 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:03,496 INFO SenderThread:8266 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..3a59bd49e18e77bd2a7aaee2712bf943eaa92751 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Configure stats pid to 6971 +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:30:54,293 WARNING MainThread:6971 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug.log +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/logs/debug-internal.log +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:init():610] starting backend +2024-05-14 10:30:54,293 INFO MainThread:6971 [wandb_init.py:init():614] setting up manager +2024-05-14 10:30:54,295 INFO MainThread:6971 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:30:54,296 INFO MainThread:6971 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:30:54,298 INFO MainThread:6971 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:30:54,309 INFO MainThread:6971 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:30:54,523 INFO MainThread:6971 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:30:54,604 INFO MainThread:6971 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:30:54,604 INFO MainThread:6971 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:30:54,807 INFO MainThread:6971 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:30:54,807 INFO MainThread:6971 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:30:54,808 INFO MainThread:6971 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:30:54,808 INFO MainThread:6971 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:30:54,809 INFO MainThread:6971 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:31:03,497 WARNING MsgRouterThr:6971 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/run-hd0wx5w4.wandb b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/run-hd0wx5w4.wandb new file mode 100644 index 0000000000000000000000000000000000000000..cc3f3b665a5f4ec16dc3f7c78b46759265a4550c Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103054-hd0wx5w4/run-hd0wx5w4.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8771018f2910ee2c5d6d5b9a512f221362a10ed1 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716399128 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0a0b9004149e77e277e193b2ff50faab5a64fa76 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:17:32:09,328 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:17:32:17,817 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:17:32:17,818 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:17:32:17,819 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-22:17:32:20,121 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0db61c4daf165a69a9cf96a83133849532569b70 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T17:32:09.110372", + "startedAt": "2024-05-22T17:32:08.595497", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2326.986975, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.6457061767578 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..fd713b653aec5255092522ae073ae0c29ae8ab20 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 17:32:08,624 INFO StreamThr :802 [internal.py:wandb_internal():85] W&B internal server running at pid: 802, started at: 2024-05-22 17:32:08.622741 +2024-05-22 17:32:08,629 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: status +2024-05-22 17:32:08,631 INFO WriterThread:802 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/run-ncweemna.wandb +2024-05-22 17:32:08,633 DEBUG SenderThread:802 [sender.py:send():378] send: header +2024-05-22 17:32:08,634 DEBUG SenderThread:802 [sender.py:send():378] send: run +2024-05-22 17:32:08,901 INFO SenderThread:802 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files +2024-05-22 17:32:08,901 INFO SenderThread:802 [sender.py:_start_run_threads():1123] run started: ncweemna with start time 1716399128.622597 +2024-05-22 17:32:08,910 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 17:32:08,911 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: check_version +2024-05-22 17:32:09,033 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 17:32:09,036 DEBUG HandlerThread:802 [system_info.py:__init__():26] System info init +2024-05-22 17:32:09,036 DEBUG HandlerThread:802 [system_info.py:__init__():41] System info init done +2024-05-22 17:32:09,036 INFO HandlerThread:802 [system_monitor.py:start():194] Starting system monitor +2024-05-22 17:32:09,036 INFO SystemMonitor:802 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 17:32:09,036 INFO HandlerThread:802 [system_monitor.py:probe():214] Collecting system info +2024-05-22 17:32:09,043 INFO SystemMonitor:802 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 17:32:09,044 INFO SystemMonitor:802 [interfaces.py:start():188] Started disk monitoring +2024-05-22 17:32:09,045 INFO SystemMonitor:802 [interfaces.py:start():188] Started memory monitoring +2024-05-22 17:32:09,050 INFO SystemMonitor:802 [interfaces.py:start():188] Started network monitoring +2024-05-22 17:32:09,110 DEBUG HandlerThread:802 [system_info.py:probe():150] Probing system +2024-05-22 17:32:09,113 DEBUG HandlerThread:802 [system_info.py:_probe_git():135] Probing git +2024-05-22 17:32:09,123 ERROR HandlerThread:802 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 17:32:09,123 DEBUG HandlerThread:802 [system_info.py:_probe_git():143] Probing git done +2024-05-22 17:32:09,123 DEBUG HandlerThread:802 [system_info.py:probe():198] Probing system done +2024-05-22 17:32:09,123 DEBUG HandlerThread:802 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T17:32:09.110372', 'startedAt': '2024-05-22T17:32:08.595497', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2326.986975, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6457061767578}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 17:32:09,124 INFO HandlerThread:802 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 17:32:09,124 INFO HandlerThread:802 [system_monitor.py:probe():227] Publishing system info +2024-05-22 17:32:09,126 INFO HandlerThread:802 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 17:32:09,132 DEBUG SenderThread:802 [sender.py:send():378] send: files +2024-05-22 17:32:09,132 INFO SenderThread:802 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 17:32:09,309 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 17:32:09,310 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: python_packages +2024-05-22 17:32:09,312 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 17:32:09,326 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: stop_status +2024-05-22 17:32:09,412 DEBUG SenderThread:802 [sender.py:send():378] send: telemetry +2024-05-22 17:32:09,818 INFO wandb-upload_0:802 [upload_job.py:push():130] Uploaded file /tmp/tmpv72wpx3nwandb/9ilwq0mg-wandb-metadata.json +2024-05-22 17:32:09,904 INFO Thread-12 :802 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt +2024-05-22 17:32:09,904 INFO Thread-12 :802 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-metadata.json +2024-05-22 17:32:09,905 INFO Thread-12 :802 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log +2024-05-22 17:32:11,904 INFO Thread-12 :802 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log +2024-05-22 17:32:14,417 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 17:32:19,819 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 17:32:19,911 INFO Thread-12 :802 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log +2024-05-22 17:32:20,128 DEBUG SenderThread:802 [sender.py:send():378] send: exit +2024-05-22 17:32:20,128 INFO SenderThread:802 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 17:32:20,129 INFO SenderThread:802 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 17:32:20,130 INFO SenderThread:802 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 17:32:20,130 INFO SenderThread:802 [sender.py:send_exit():593] send defer +2024-05-22 17:32:20,130 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,130 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 17:32:20,130 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,130 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 17:32:20,131 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 1 +2024-05-22 17:32:20,131 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,131 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 17:32:20,131 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,131 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 17:32:20,131 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 2 +2024-05-22 17:32:20,131 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,131 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 17:32:20,131 INFO HandlerThread:802 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 17:32:20,131 DEBUG SystemMonitor:802 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 17:32:20,132 DEBUG SystemMonitor:802 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 17:32:20,132 INFO HandlerThread:802 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 17:32:20,132 DEBUG SystemMonitor:802 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 17:32:20,132 INFO HandlerThread:802 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 17:32:20,134 INFO HandlerThread:802 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 17:32:20,134 INFO HandlerThread:802 [interfaces.py:finish():200] Joined network monitor +2024-05-22 17:32:20,134 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,134 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 17:32:20,134 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 3 +2024-05-22 17:32:20,134 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,134 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 17:32:20,135 DEBUG SenderThread:802 [sender.py:send():378] send: stats +2024-05-22 17:32:20,136 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,136 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 17:32:20,136 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 4 +2024-05-22 17:32:20,136 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,136 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 17:32:20,136 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,136 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 17:32:20,136 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 5 +2024-05-22 17:32:20,136 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,136 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 17:32:20,136 DEBUG SenderThread:802 [sender.py:send():378] send: summary +2024-05-22 17:32:20,137 INFO SenderThread:802 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 17:32:20,137 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,137 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 17:32:20,137 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 6 +2024-05-22 17:32:20,138 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,138 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 17:32:20,138 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,138 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 17:32:20,142 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 17:32:20,227 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 7 +2024-05-22 17:32:20,227 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:20,227 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 17:32:20,227 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:20,227 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 17:32:20,912 INFO Thread-12 :802 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml +2024-05-22 17:32:20,913 INFO Thread-12 :802 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json +2024-05-22 17:32:21,128 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 17:32:21,438 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 8 +2024-05-22 17:32:21,438 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 17:32:21,438 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:21,438 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 17:32:21,439 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:21,439 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 17:32:21,439 INFO SenderThread:802 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 17:32:21,439 INFO SenderThread:802 [job_builder.py:_get_source_type():576] no source found +2024-05-22 17:32:21,439 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 9 +2024-05-22 17:32:21,439 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:21,439 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 17:32:21,439 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:21,439 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 17:32:21,439 INFO SenderThread:802 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 17:32:21,914 INFO SenderThread:802 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log +2024-05-22 17:32:21,914 INFO SenderThread:802 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files +2024-05-22 17:32:21,914 INFO SenderThread:802 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt requirements.txt +2024-05-22 17:32:21,914 INFO SenderThread:802 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml config.yaml +2024-05-22 17:32:21,917 INFO SenderThread:802 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log output.log +2024-05-22 17:32:21,917 INFO SenderThread:802 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json wandb-summary.json +2024-05-22 17:32:21,917 INFO SenderThread:802 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-metadata.json wandb-metadata.json +2024-05-22 17:32:21,917 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 10 +2024-05-22 17:32:21,917 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:21,917 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 17:32:21,918 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:21,918 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 17:32:21,918 INFO SenderThread:802 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 17:32:22,129 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 17:32:22,129 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 17:32:22,166 INFO wandb-upload_0:802 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/requirements.txt +2024-05-22 17:32:22,519 INFO wandb-upload_3:802 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/wandb-summary.json +2024-05-22 17:32:22,557 INFO wandb-upload_1:802 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/config.yaml +2024-05-22 17:32:22,598 INFO wandb-upload_2:802 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/files/output.log +2024-05-22 17:32:22,798 INFO Thread-11 (_thread_body):802 [sender.py:transition_state():613] send defer: 11 +2024-05-22 17:32:22,798 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:22,798 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 17:32:22,799 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:22,799 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 17:32:22,799 INFO SenderThread:802 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 17:32:22,799 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 12 +2024-05-22 17:32:22,799 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:22,799 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 17:32:22,799 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:22,799 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 17:32:22,799 INFO SenderThread:802 [file_stream.py:finish():601] file stream finish called +2024-05-22 17:32:22,861 INFO SenderThread:802 [file_stream.py:finish():605] file stream finish is done +2024-05-22 17:32:22,861 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 13 +2024-05-22 17:32:22,861 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:22,861 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 17:32:22,861 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:22,861 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 17:32:22,862 INFO SenderThread:802 [sender.py:transition_state():613] send defer: 14 +2024-05-22 17:32:22,862 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: defer +2024-05-22 17:32:22,862 INFO HandlerThread:802 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 17:32:22,862 DEBUG SenderThread:802 [sender.py:send():378] send: final +2024-05-22 17:32:22,862 DEBUG SenderThread:802 [sender.py:send():378] send: footer +2024-05-22 17:32:22,862 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: defer +2024-05-22 17:32:22,862 INFO SenderThread:802 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 17:32:22,863 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 17:32:22,863 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 17:32:22,863 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 17:32:22,864 DEBUG SenderThread:802 [sender.py:send_request():405] send_request: server_info +2024-05-22 17:32:22,925 INFO MainThread:802 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 17:32:22,925 INFO MainThread:802 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 17:32:22,926 INFO MainThread:802 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 17:32:22,926 DEBUG HandlerThread:802 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 17:32:22,926 INFO HandlerThread:802 [handler.py:finish():882] shutting down handler +2024-05-22 17:32:23,863 INFO WriterThread:802 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/run-ncweemna.wandb +2024-05-22 17:32:23,925 INFO SenderThread:802 [sender.py:finish():1545] shutting down sender +2024-05-22 17:32:23,925 INFO SenderThread:802 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 17:32:23,925 INFO SenderThread:802 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..9db7d098b0b8eba6ce24f8da55e58a59632ae38b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Configure stats pid to 646 +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 17:32:08,616 WARNING MainThread:646 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 17:32:08,616 INFO MainThread:646 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug.log +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/logs/debug-internal.log +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:init():560] calling init triggers +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:init():610] starting backend +2024-05-22 17:32:08,617 INFO MainThread:646 [wandb_init.py:init():614] setting up manager +2024-05-22 17:32:08,621 INFO MainThread:646 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 17:32:08,622 INFO MainThread:646 [wandb_init.py:init():622] backend started and connected +2024-05-22 17:32:08,625 INFO MainThread:646 [wandb_init.py:init():711] updated telemetry +2024-05-22 17:32:08,634 INFO MainThread:646 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 17:32:08,910 INFO MainThread:646 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 17:32:09,027 INFO MainThread:646 [wandb_run.py:_on_init():2405] got version response +2024-05-22 17:32:09,027 INFO MainThread:646 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 17:32:09,310 INFO MainThread:646 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 17:32:09,311 INFO MainThread:646 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 17:32:09,311 INFO MainThread:646 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 17:32:09,311 INFO MainThread:646 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 17:32:09,326 INFO MainThread:646 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 17:32:23,927 WARNING MsgRouterThr:646 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/run-ncweemna.wandb b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/run-ncweemna.wandb new file mode 100644 index 0000000000000000000000000000000000000000..23813ef4cb9226af5247461b4a55d7b8568e9130 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_173208-ncweemna/run-ncweemna.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55ca975dd785892f2cd5f071d80da43b4f15eab2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716404027 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..29238ef6466e90747b75a8eacdf5f6bc2dda8e74 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:53:48,730 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:53:57,147 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:53:57,148 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:53:57,148 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'} +2024-05-22:18:53:59,421 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..fc146d6e1ce21c5f8ba3ebc4b874ce3fa0e1e746 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:53:48.497427", + "startedAt": "2024-05-22T18:53:47.980290", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.1044875, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63544845581055 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..4fee12cb57d15b69d4a45878e8d2d408b5fd76fd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-22 18:53:48,001 INFO StreamThr :1570 [internal.py:wandb_internal():85] W&B internal server running at pid: 1570, started at: 2024-05-22 18:53:47.999366 +2024-05-22 18:53:48,005 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:53:48,006 INFO WriterThread:1570 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/run-tyy073dg.wandb +2024-05-22 18:53:48,008 DEBUG SenderThread:1570 [sender.py:send():378] send: header +2024-05-22 18:53:48,011 DEBUG SenderThread:1570 [sender.py:send():378] send: run +2024-05-22 18:53:48,289 INFO SenderThread:1570 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files +2024-05-22 18:53:48,289 INFO SenderThread:1570 [sender.py:_start_run_threads():1123] run started: tyy073dg with start time 1716404027.99922 +2024-05-22 18:53:48,289 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:53:48,289 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:53:48,403 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:53:48,405 DEBUG HandlerThread:1570 [system_info.py:__init__():26] System info init +2024-05-22 18:53:48,405 DEBUG HandlerThread:1570 [system_info.py:__init__():41] System info init done +2024-05-22 18:53:48,405 INFO HandlerThread:1570 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:53:48,405 INFO SystemMonitor:1570 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:53:48,406 INFO HandlerThread:1570 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:53:48,412 INFO SystemMonitor:1570 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:53:48,413 INFO SystemMonitor:1570 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:53:48,413 INFO SystemMonitor:1570 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:53:48,413 INFO SystemMonitor:1570 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:53:48,497 DEBUG HandlerThread:1570 [system_info.py:probe():150] Probing system +2024-05-22 18:53:48,500 DEBUG HandlerThread:1570 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:53:48,510 ERROR HandlerThread:1570 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:53:48,510 DEBUG HandlerThread:1570 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:53:48,510 DEBUG HandlerThread:1570 [system_info.py:probe():198] Probing system done +2024-05-22 18:53:48,510 DEBUG HandlerThread:1570 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:53:48.497427', 'startedAt': '2024-05-22T18:53:47.980290', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.1044875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63544845581055}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:53:48,511 INFO HandlerThread:1570 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:53:48,511 INFO HandlerThread:1570 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:53:48,514 INFO HandlerThread:1570 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:53:48,519 DEBUG SenderThread:1570 [sender.py:send():378] send: files +2024-05-22 18:53:48,519 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:53:48,716 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:53:48,716 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:53:48,717 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:53:48,718 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:53:48,792 DEBUG SenderThread:1570 [sender.py:send():378] send: telemetry +2024-05-22 18:53:49,065 INFO wandb-upload_0:1570 [upload_job.py:push():130] Uploaded file /tmp/tmpo_3j4p8wwandb/u6ipa4uh-wandb-metadata.json +2024-05-22 18:53:49,291 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-metadata.json +2024-05-22 18:53:49,291 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt +2024-05-22 18:53:49,291 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log +2024-05-22 18:53:51,291 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log +2024-05-22 18:53:53,795 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:53:59,149 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:53:59,298 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log +2024-05-22 18:53:59,429 DEBUG SenderThread:1570 [sender.py:send():378] send: exit +2024-05-22 18:53:59,429 INFO SenderThread:1570 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:53:59,429 INFO SenderThread:1570 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:53:59,430 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:53:59,430 INFO SenderThread:1570 [sender.py:send_exit():593] send defer +2024-05-22 18:53:59,430 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,431 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:53:59,431 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,431 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:53:59,431 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:53:59,431 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,431 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:53:59,431 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,431 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:53:59,431 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:53:59,431 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,431 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:53:59,431 INFO HandlerThread:1570 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:53:59,431 DEBUG SystemMonitor:1570 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:53:59,431 DEBUG SystemMonitor:1570 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:53:59,431 DEBUG SystemMonitor:1570 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:53:59,432 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:53:59,432 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:53:59,432 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:53:59,432 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:53:59,433 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:53:59,433 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,433 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:53:59,433 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:53:59,433 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,433 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:53:59,433 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:53:59,433 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:53:59,433 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,433 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:53:59,434 DEBUG SenderThread:1570 [sender.py:send():378] send: summary +2024-05-22 18:53:59,434 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:53:59,435 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,435 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:53:59,435 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:53:59,435 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,435 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:53:59,435 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,435 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:53:59,440 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:53:59,505 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:53:59,505 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:53:59,505 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:53:59,505 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:53:59,505 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:54:00,300 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml +2024-05-22 18:54:00,300 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json +2024-05-22 18:54:00,429 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:54:00,816 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:54:00,816 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:54:00,816 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:00,816 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:54:00,816 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:00,816 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:54:00,816 INFO SenderThread:1570 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:54:00,817 INFO SenderThread:1570 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:54:00,817 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:54:00,817 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:00,817 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:54:00,817 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:00,817 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:54:00,817 INFO SenderThread:1570 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:54:01,301 INFO SenderThread:1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log +2024-05-22 18:54:01,301 INFO SenderThread:1570 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files +2024-05-22 18:54:01,301 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log output.log +2024-05-22 18:54:01,302 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml config.yaml +2024-05-22 18:54:01,304 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json wandb-summary.json +2024-05-22 18:54:01,304 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt requirements.txt +2024-05-22 18:54:01,304 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:54:01,304 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:54:01,305 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:01,305 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:54:01,306 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:01,307 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:54:01,307 INFO SenderThread:1570 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:54:01,429 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:54:01,429 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:54:01,594 INFO wandb-upload_0:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/output.log +2024-05-22 18:54:01,909 INFO wandb-upload_1:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/config.yaml +2024-05-22 18:54:01,924 INFO wandb-upload_2:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/wandb-summary.json +2024-05-22 18:54:01,959 INFO wandb-upload_3:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/files/requirements.txt +2024-05-22 18:54:02,159 INFO Thread-11 (_thread_body):1570 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:54:02,159 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:02,159 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:54:02,160 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:02,160 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:54:02,160 INFO SenderThread:1570 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:54:02,160 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:54:02,160 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:02,160 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:54:02,160 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:02,160 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:54:02,161 INFO SenderThread:1570 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:54:02,258 INFO SenderThread:1570 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:54:02,258 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:54:02,258 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:02,258 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:54:02,258 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:02,259 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:54:02,259 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:54:02,259 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:54:02,259 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:54:02,259 DEBUG SenderThread:1570 [sender.py:send():378] send: final +2024-05-22 18:54:02,259 DEBUG SenderThread:1570 [sender.py:send():378] send: footer +2024-05-22 18:54:02,259 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer +2024-05-22 18:54:02,259 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:54:02,260 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:54:02,261 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:54:02,261 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:54:02,261 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:54:02,315 INFO MainThread:1570 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:54:02,315 INFO MainThread:1570 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:54:02,315 INFO MainThread:1570 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:54:02,315 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:54:02,315 INFO HandlerThread:1570 [handler.py:finish():882] shutting down handler +2024-05-22 18:54:03,260 INFO WriterThread:1570 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/run-tyy073dg.wandb +2024-05-22 18:54:03,315 INFO SenderThread:1570 [sender.py:finish():1545] shutting down sender +2024-05-22 18:54:03,315 INFO SenderThread:1570 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:54:03,315 INFO SenderThread:1570 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..ab320523ab5a308a9ff52c6c0758ba14efcbf48d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Configure stats pid to 1415 +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:53:47,993 WARNING MainThread:1415 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:53:47,993 INFO MainThread:1415 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug.log +2024-05-22 18:53:47,994 INFO MainThread:1415 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/logs/debug-internal.log +2024-05-22 18:53:47,994 INFO MainThread:1415 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:53:47,994 INFO MainThread:1415 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:53:47,994 INFO MainThread:1415 [wandb_init.py:init():610] starting backend +2024-05-22 18:53:47,994 INFO MainThread:1415 [wandb_init.py:init():614] setting up manager +2024-05-22 18:53:47,998 INFO MainThread:1415 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:53:47,999 INFO MainThread:1415 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:53:48,002 INFO MainThread:1415 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:53:48,010 INFO MainThread:1415 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:53:48,288 INFO MainThread:1415 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:53:48,397 INFO MainThread:1415 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:53:48,397 INFO MainThread:1415 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:53:48,725 INFO MainThread:1415 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:53:48,725 INFO MainThread:1415 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:53:48,726 INFO MainThread:1415 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:53:48,726 INFO MainThread:1415 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:53:48,728 INFO MainThread:1415 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:54:03,316 WARNING MsgRouterThr:1415 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/run-tyy073dg.wandb b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/run-tyy073dg.wandb new file mode 100644 index 0000000000000000000000000000000000000000..63aeb2d5d116b87179b076b699dea5179f532bec Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_185347-tyy073dg/run-tyy073dg.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..896c461861181abb181a5f10d4084fd7d1639172 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716450663 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0c47256a7d24c7d0dabb70cc0f1f2f14af3c3d5b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:07:51:04,269 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:07:51:13,688 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:07:51:13,689 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:07:51:13,690 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:07:51:15,967 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..985d306f457fe5eb0b771413ffd590fcacd89bff --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T07:51:04.056548", + "startedAt": "2024-05-23T07:51:03.528847", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2340.6151937500003, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.57160186767578 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..018b4068e81a342faa2a3b0691dee6965106bcc3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 12}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..242e155a03e0831b5e43cab5983e43426f3ac179 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-23 07:51:03,551 INFO StreamThr :799 [internal.py:wandb_internal():85] W&B internal server running at pid: 799, started at: 2024-05-23 07:51:03.549793 +2024-05-23 07:51:03,555 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: status +2024-05-23 07:51:03,557 INFO WriterThread:799 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/run-0b6cufsb.wandb +2024-05-23 07:51:03,559 DEBUG SenderThread:799 [sender.py:send():378] send: header +2024-05-23 07:51:03,563 DEBUG SenderThread:799 [sender.py:send():378] send: run +2024-05-23 07:51:03,832 INFO SenderThread:799 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files +2024-05-23 07:51:03,833 INFO SenderThread:799 [sender.py:_start_run_threads():1123] run started: 0b6cufsb with start time 1716450663.549651 +2024-05-23 07:51:03,841 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 07:51:03,841 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: check_version +2024-05-23 07:51:03,959 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 07:51:03,961 DEBUG HandlerThread:799 [system_info.py:__init__():26] System info init +2024-05-23 07:51:03,961 DEBUG HandlerThread:799 [system_info.py:__init__():41] System info init done +2024-05-23 07:51:03,961 INFO HandlerThread:799 [system_monitor.py:start():194] Starting system monitor +2024-05-23 07:51:03,962 INFO SystemMonitor:799 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 07:51:03,962 INFO HandlerThread:799 [system_monitor.py:probe():214] Collecting system info +2024-05-23 07:51:03,969 INFO SystemMonitor:799 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 07:51:03,969 INFO SystemMonitor:799 [interfaces.py:start():188] Started disk monitoring +2024-05-23 07:51:03,969 INFO SystemMonitor:799 [interfaces.py:start():188] Started memory monitoring +2024-05-23 07:51:03,971 INFO SystemMonitor:799 [interfaces.py:start():188] Started network monitoring +2024-05-23 07:51:04,056 DEBUG HandlerThread:799 [system_info.py:probe():150] Probing system +2024-05-23 07:51:04,059 DEBUG HandlerThread:799 [system_info.py:_probe_git():135] Probing git +2024-05-23 07:51:04,069 ERROR HandlerThread:799 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 07:51:04,069 DEBUG HandlerThread:799 [system_info.py:_probe_git():143] Probing git done +2024-05-23 07:51:04,070 DEBUG HandlerThread:799 [system_info.py:probe():198] Probing system done +2024-05-23 07:51:04,070 DEBUG HandlerThread:799 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T07:51:04.056548', 'startedAt': '2024-05-23T07:51:03.528847', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2340.6151937500003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.57160186767578}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 07:51:04,070 INFO HandlerThread:799 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 07:51:04,070 INFO HandlerThread:799 [system_monitor.py:probe():227] Publishing system info +2024-05-23 07:51:04,073 INFO HandlerThread:799 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 07:51:04,080 DEBUG SenderThread:799 [sender.py:send():378] send: files +2024-05-23 07:51:04,080 INFO SenderThread:799 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 07:51:04,262 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 07:51:04,262 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: python_packages +2024-05-23 07:51:04,263 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 07:51:04,265 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: stop_status +2024-05-23 07:51:04,392 DEBUG SenderThread:799 [sender.py:send():378] send: telemetry +2024-05-23 07:51:04,687 INFO wandb-upload_0:799 [upload_job.py:push():130] Uploaded file /tmp/tmpkxsuibgiwandb/olgpo615-wandb-metadata.json +2024-05-23 07:51:04,835 INFO Thread-12 :799 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt +2024-05-23 07:51:04,835 INFO Thread-12 :799 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-metadata.json +2024-05-23 07:51:04,835 INFO Thread-12 :799 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log +2024-05-23 07:51:06,835 INFO Thread-12 :799 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log +2024-05-23 07:51:09,396 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:51:14,691 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:51:14,841 INFO Thread-12 :799 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log +2024-05-23 07:51:15,988 DEBUG SenderThread:799 [sender.py:send():378] send: exit +2024-05-23 07:51:15,988 INFO SenderThread:799 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 07:51:15,988 INFO SenderThread:799 [sender.py:send_exit():587] handling runtime: 12 +2024-05-23 07:51:15,992 INFO SenderThread:799 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:51:15,992 INFO SenderThread:799 [sender.py:send_exit():593] send defer +2024-05-23 07:51:15,992 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,992 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 07:51:15,993 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,993 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 07:51:15,993 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 1 +2024-05-23 07:51:15,993 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,993 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 07:51:15,993 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,993 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 07:51:15,993 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 2 +2024-05-23 07:51:15,993 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,993 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 07:51:15,993 INFO HandlerThread:799 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 07:51:15,993 INFO HandlerThread:799 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 07:51:15,994 DEBUG SystemMonitor:799 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 07:51:15,994 DEBUG SystemMonitor:799 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 07:51:15,994 DEBUG SystemMonitor:799 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 07:51:15,995 INFO HandlerThread:799 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 07:51:15,995 INFO HandlerThread:799 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 07:51:15,995 INFO HandlerThread:799 [interfaces.py:finish():200] Joined network monitor +2024-05-23 07:51:15,996 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,996 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 07:51:15,996 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 3 +2024-05-23 07:51:15,996 DEBUG SenderThread:799 [sender.py:send():378] send: stats +2024-05-23 07:51:15,997 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,997 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 07:51:15,998 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,998 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 07:51:15,998 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 4 +2024-05-23 07:51:15,998 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,998 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 07:51:15,998 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,998 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 07:51:15,998 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 5 +2024-05-23 07:51:15,998 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,998 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 07:51:15,998 DEBUG SenderThread:799 [sender.py:send():378] send: summary +2024-05-23 07:51:15,999 INFO SenderThread:799 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:51:15,999 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,999 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 07:51:15,999 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 6 +2024-05-23 07:51:15,999 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:15,999 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 07:51:15,999 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:15,999 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 07:51:16,004 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:51:16,073 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 7 +2024-05-23 07:51:16,073 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:16,073 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 07:51:16,073 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:16,073 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 07:51:16,427 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 8 +2024-05-23 07:51:16,427 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:16,427 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 07:51:16,427 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:16,427 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 07:51:16,428 INFO SenderThread:799 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 07:51:16,428 INFO SenderThread:799 [job_builder.py:_get_source_type():576] no source found +2024-05-23 07:51:16,428 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 9 +2024-05-23 07:51:16,428 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:16,428 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 07:51:16,428 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:16,428 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 07:51:16,428 INFO SenderThread:799 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 07:51:16,842 INFO SenderThread:799 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log +2024-05-23 07:51:16,843 INFO SenderThread:799 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml +2024-05-23 07:51:16,843 INFO SenderThread:799 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json +2024-05-23 07:51:16,843 INFO SenderThread:799 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files +2024-05-23 07:51:16,843 INFO SenderThread:799 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt requirements.txt +2024-05-23 07:51:16,843 INFO SenderThread:799 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log output.log +2024-05-23 07:51:16,846 INFO SenderThread:799 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json wandb-summary.json +2024-05-23 07:51:16,846 INFO SenderThread:799 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-metadata.json wandb-metadata.json +2024-05-23 07:51:16,846 INFO SenderThread:799 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml config.yaml +2024-05-23 07:51:16,846 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 10 +2024-05-23 07:51:16,846 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:16,846 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 07:51:16,846 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:16,846 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 07:51:16,847 INFO SenderThread:799 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:51:16,988 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:51:16,988 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:51:17,101 INFO wandb-upload_0:799 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/requirements.txt +2024-05-23 07:51:17,413 INFO wandb-upload_1:799 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/output.log +2024-05-23 07:51:17,467 INFO wandb-upload_2:799 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/wandb-summary.json +2024-05-23 07:51:17,486 INFO wandb-upload_3:799 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/files/config.yaml +2024-05-23 07:51:17,686 INFO Thread-11 (_thread_body):799 [sender.py:transition_state():613] send defer: 11 +2024-05-23 07:51:17,687 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:17,687 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 07:51:17,687 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:17,687 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 07:51:17,687 INFO SenderThread:799 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 07:51:17,687 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 12 +2024-05-23 07:51:17,687 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:17,687 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 07:51:17,687 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:17,687 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 07:51:17,687 INFO SenderThread:799 [file_stream.py:finish():601] file stream finish called +2024-05-23 07:51:17,766 INFO SenderThread:799 [file_stream.py:finish():605] file stream finish is done +2024-05-23 07:51:17,766 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 13 +2024-05-23 07:51:17,766 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:17,766 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 07:51:17,766 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:17,766 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 07:51:17,766 INFO SenderThread:799 [sender.py:transition_state():613] send defer: 14 +2024-05-23 07:51:17,766 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:51:17,767 INFO HandlerThread:799 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 07:51:17,767 DEBUG SenderThread:799 [sender.py:send():378] send: final +2024-05-23 07:51:17,767 DEBUG SenderThread:799 [sender.py:send():378] send: footer +2024-05-23 07:51:17,767 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: defer +2024-05-23 07:51:17,767 INFO SenderThread:799 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 07:51:17,767 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:51:17,767 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:51:17,768 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:51:17,768 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 07:51:17,768 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 07:51:17,768 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 07:51:17,768 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 07:51:17,768 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:51:17,769 DEBUG SenderThread:799 [sender.py:send_request():405] send_request: server_info +2024-05-23 07:51:17,821 INFO MainThread:799 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 07:51:17,821 INFO MainThread:799 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 07:51:17,821 INFO MainThread:799 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 07:51:17,822 DEBUG HandlerThread:799 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 07:51:17,822 INFO HandlerThread:799 [handler.py:finish():882] shutting down handler +2024-05-23 07:51:18,769 INFO WriterThread:799 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/run-0b6cufsb.wandb +2024-05-23 07:51:18,821 INFO SenderThread:799 [sender.py:finish():1545] shutting down sender +2024-05-23 07:51:18,821 INFO SenderThread:799 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:51:18,821 INFO SenderThread:799 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..d68dc7c0a3707bd987a06f3bddc3b0411a73e2a5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Configure stats pid to 643 +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 07:51:03,543 INFO MainThread:643 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 07:51:03,544 WARNING MainThread:643 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug.log +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/logs/debug-internal.log +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:init():560] calling init triggers +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:init():610] starting backend +2024-05-23 07:51:03,544 INFO MainThread:643 [wandb_init.py:init():614] setting up manager +2024-05-23 07:51:03,548 INFO MainThread:643 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 07:51:03,549 INFO MainThread:643 [wandb_init.py:init():622] backend started and connected +2024-05-23 07:51:03,553 INFO MainThread:643 [wandb_init.py:init():711] updated telemetry +2024-05-23 07:51:03,562 INFO MainThread:643 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 07:51:03,840 INFO MainThread:643 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 07:51:03,953 INFO MainThread:643 [wandb_run.py:_on_init():2405] got version response +2024-05-23 07:51:03,953 INFO MainThread:643 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 07:51:04,263 INFO MainThread:643 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 07:51:04,263 INFO MainThread:643 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 07:51:04,263 INFO MainThread:643 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 07:51:04,263 INFO MainThread:643 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 07:51:04,266 INFO MainThread:643 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 07:51:18,822 WARNING MsgRouterThr:643 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/run-0b6cufsb.wandb b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/run-0b6cufsb.wandb new file mode 100644 index 0000000000000000000000000000000000000000..2fb1df2bb5151f39e0a3209ecf4f9b896e7f27e7 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_075103-0b6cufsb/run-0b6cufsb.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26a5f5629b52ff822f01c837c9a98832c80b6540 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716467596 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..a05424d86d2e082a13662f46daef1c93ef799909 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:33:17,558 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:33:26,129 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:33:26,130 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:33:26,130 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000'} +2024-05-23:12:33:28,422 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..52a05db37e24f42c3f4b98a1c9b4718207c77af6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:33:17.349956", + "startedAt": "2024-05-23T12:33:16.794747", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2334.22650625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3386.123, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.58385848999023 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..8fd3bccdd0f302d8ad212da288f598abf4146ad5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 12:33:16,818 INFO StreamThr :2343 [internal.py:wandb_internal():85] W&B internal server running at pid: 2343, started at: 2024-05-23 12:33:16.816964 +2024-05-23 12:33:16,823 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:33:16,824 INFO WriterThread:2343 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/run-2kqk5jdh.wandb +2024-05-23 12:33:16,826 DEBUG SenderThread:2343 [sender.py:send():378] send: header +2024-05-23 12:33:16,830 DEBUG SenderThread:2343 [sender.py:send():378] send: run +2024-05-23 12:33:17,128 INFO SenderThread:2343 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files +2024-05-23 12:33:17,128 INFO SenderThread:2343 [sender.py:_start_run_threads():1123] run started: 2kqk5jdh with start time 1716467596.817606 +2024-05-23 12:33:17,132 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:33:17,132 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:33:17,253 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:33:17,256 DEBUG HandlerThread:2343 [system_info.py:__init__():26] System info init +2024-05-23 12:33:17,256 DEBUG HandlerThread:2343 [system_info.py:__init__():41] System info init done +2024-05-23 12:33:17,256 INFO HandlerThread:2343 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:33:17,256 INFO SystemMonitor:2343 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:33:17,256 INFO HandlerThread:2343 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:33:17,263 INFO SystemMonitor:2343 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:33:17,263 INFO SystemMonitor:2343 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:33:17,263 INFO SystemMonitor:2343 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:33:17,265 INFO SystemMonitor:2343 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:33:17,349 DEBUG HandlerThread:2343 [system_info.py:probe():150] Probing system +2024-05-23 12:33:17,353 DEBUG HandlerThread:2343 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:33:17,363 ERROR HandlerThread:2343 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:33:17,363 DEBUG HandlerThread:2343 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:33:17,363 DEBUG HandlerThread:2343 [system_info.py:probe():198] Probing system done +2024-05-23 12:33:17,363 DEBUG HandlerThread:2343 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:33:17.349956', 'startedAt': '2024-05-23T12:33:16.794747', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.22650625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.998, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3386.123, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.58385848999023}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 12:33:17,363 INFO HandlerThread:2343 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:33:17,363 INFO HandlerThread:2343 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:33:17,366 INFO HandlerThread:2343 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:33:17,371 DEBUG SenderThread:2343 [sender.py:send():378] send: files +2024-05-23 12:33:17,371 INFO SenderThread:2343 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:33:17,552 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:33:17,552 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:33:17,554 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:33:17,555 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:33:17,633 DEBUG SenderThread:2343 [sender.py:send():378] send: telemetry +2024-05-23 12:33:17,964 INFO wandb-upload_0:2343 [upload_job.py:push():130] Uploaded file /tmp/tmph8j247ivwandb/29s3onwr-wandb-metadata.json +2024-05-23 12:33:18,131 INFO Thread-12 :2343 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log +2024-05-23 12:33:18,131 INFO Thread-12 :2343 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-metadata.json +2024-05-23 12:33:18,131 INFO Thread-12 :2343 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt +2024-05-23 12:33:20,130 INFO Thread-12 :2343 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log +2024-05-23 12:33:22,635 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:33:28,131 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:33:28,137 INFO Thread-12 :2343 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log +2024-05-23 12:33:28,430 DEBUG SenderThread:2343 [sender.py:send():378] send: exit +2024-05-23 12:33:28,430 INFO SenderThread:2343 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:33:28,430 INFO SenderThread:2343 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 12:33:28,431 INFO SenderThread:2343 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:33:28,432 INFO SenderThread:2343 [sender.py:send_exit():593] send defer +2024-05-23 12:33:28,432 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,432 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:33:28,432 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,432 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:33:28,432 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:33:28,432 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,432 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:33:28,432 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,432 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:33:28,432 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:33:28,432 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,432 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:33:28,432 INFO HandlerThread:2343 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:33:28,433 DEBUG SystemMonitor:2343 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:33:28,433 DEBUG SystemMonitor:2343 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:33:28,433 DEBUG SystemMonitor:2343 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:33:28,436 INFO HandlerThread:2343 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:33:28,436 INFO HandlerThread:2343 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:33:28,436 INFO HandlerThread:2343 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:33:28,436 INFO HandlerThread:2343 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:33:28,436 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,436 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:33:28,437 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:33:28,437 DEBUG SenderThread:2343 [sender.py:send():378] send: stats +2024-05-23 12:33:28,438 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,438 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:33:28,438 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,438 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:33:28,438 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:33:28,438 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,438 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:33:28,438 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,438 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:33:28,438 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:33:28,438 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,438 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:33:28,439 DEBUG SenderThread:2343 [sender.py:send():378] send: summary +2024-05-23 12:33:28,439 INFO SenderThread:2343 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:33:28,440 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,440 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:33:28,440 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:33:28,440 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,440 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:33:28,440 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,440 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:33:28,445 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:33:28,509 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:33:28,509 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:28,509 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:33:28,510 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:28,510 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:33:29,139 INFO Thread-12 :2343 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml +2024-05-23 12:33:29,139 INFO Thread-12 :2343 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json +2024-05-23 12:33:29,430 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:33:29,654 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:33:29,654 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:33:29,654 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:29,654 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:33:29,654 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:29,654 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:33:29,654 INFO SenderThread:2343 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:33:29,655 INFO SenderThread:2343 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:33:29,655 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:33:29,655 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:29,655 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:33:29,655 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:29,655 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:33:29,655 INFO SenderThread:2343 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:33:30,140 INFO SenderThread:2343 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log +2024-05-23 12:33:30,140 INFO SenderThread:2343 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files +2024-05-23 12:33:30,141 INFO SenderThread:2343 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log output.log +2024-05-23 12:33:30,141 INFO SenderThread:2343 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json wandb-summary.json +2024-05-23 12:33:30,143 INFO SenderThread:2343 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt requirements.txt +2024-05-23 12:33:30,143 INFO SenderThread:2343 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml config.yaml +2024-05-23 12:33:30,144 INFO SenderThread:2343 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:33:30,144 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:33:30,144 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:30,144 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:33:30,144 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:30,144 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:33:30,144 INFO SenderThread:2343 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:33:30,393 INFO wandb-upload_0:2343 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/output.log +2024-05-23 12:33:30,430 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:33:30,430 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:33:30,752 INFO wandb-upload_2:2343 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/requirements.txt +2024-05-23 12:33:30,779 INFO wandb-upload_1:2343 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/wandb-summary.json +2024-05-23 12:33:30,784 INFO wandb-upload_3:2343 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/files/config.yaml +2024-05-23 12:33:30,984 INFO Thread-11 (_thread_body):2343 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:33:30,984 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:30,984 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:33:30,984 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:30,985 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:33:30,985 INFO SenderThread:2343 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:33:30,985 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:33:30,985 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:30,985 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:33:30,985 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:30,985 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:33:30,985 INFO SenderThread:2343 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:33:31,055 INFO SenderThread:2343 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:33:31,055 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:33:31,056 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:31,056 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:33:31,056 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:31,056 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:33:31,056 INFO SenderThread:2343 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:33:31,056 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:33:31,056 INFO HandlerThread:2343 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:33:31,056 DEBUG SenderThread:2343 [sender.py:send():378] send: final +2024-05-23 12:33:31,056 DEBUG SenderThread:2343 [sender.py:send():378] send: footer +2024-05-23 12:33:31,056 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: defer +2024-05-23 12:33:31,056 INFO SenderThread:2343 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:33:31,057 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:33:31,058 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:33:31,058 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:33:31,058 DEBUG SenderThread:2343 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:33:31,110 INFO MainThread:2343 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:33:31,111 INFO MainThread:2343 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:33:31,111 INFO MainThread:2343 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:33:31,111 DEBUG HandlerThread:2343 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:33:31,111 INFO HandlerThread:2343 [handler.py:finish():882] shutting down handler +2024-05-23 12:33:32,058 INFO WriterThread:2343 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/run-2kqk5jdh.wandb +2024-05-23 12:33:32,110 INFO SenderThread:2343 [sender.py:finish():1545] shutting down sender +2024-05-23 12:33:32,111 INFO SenderThread:2343 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:33:32,111 INFO SenderThread:2343 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..b6e313f466a466e167873258341a756b7a107dde --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Configure stats pid to 2188 +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:33:16,812 WARNING MainThread:2188 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug.log +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/logs/debug-internal.log +2024-05-23 12:33:16,812 INFO MainThread:2188 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:33:16,813 INFO MainThread:2188 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:33:16,813 INFO MainThread:2188 [wandb_init.py:init():610] starting backend +2024-05-23 12:33:16,813 INFO MainThread:2188 [wandb_init.py:init():614] setting up manager +2024-05-23 12:33:16,816 INFO MainThread:2188 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:33:16,817 INFO MainThread:2188 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:33:16,820 INFO MainThread:2188 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:33:16,829 INFO MainThread:2188 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:33:17,132 INFO MainThread:2188 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:33:17,247 INFO MainThread:2188 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:33:17,247 INFO MainThread:2188 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:33:17,553 INFO MainThread:2188 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:33:17,553 INFO MainThread:2188 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:33:17,553 INFO MainThread:2188 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:33:17,553 INFO MainThread:2188 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:33:17,555 INFO MainThread:2188 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:33:32,112 WARNING MsgRouterThr:2188 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/run-2kqk5jdh.wandb b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/run-2kqk5jdh.wandb new file mode 100644 index 0000000000000000000000000000000000000000..9bea8c1bea74c0fc37d8320704f3ca523dd73ebb Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_123316-2kqk5jdh/run-2kqk5jdh.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..0b8a88e8c475e0e3e09de3e90f8c5c68c7a07253 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 13:04:07,507 INFO StreamThr :1329 [internal.py:wandb_internal():85] W&B internal server running at pid: 1329, started at: 2024-05-23 13:04:07.505670 +2024-05-23 13:04:07,512 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:04:07,513 INFO WriterThread:1329 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/run-wvnshpcy.wandb +2024-05-23 13:04:07,515 DEBUG SenderThread:1329 [sender.py:send():378] send: header +2024-05-23 13:04:07,518 DEBUG SenderThread:1329 [sender.py:send():378] send: run +2024-05-23 13:04:07,755 INFO SenderThread:1329 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files +2024-05-23 13:04:07,755 INFO SenderThread:1329 [sender.py:_start_run_threads():1123] run started: wvnshpcy with start time 1716469447.505529 +2024-05-23 13:04:07,764 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:04:07,764 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:04:07,881 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:04:07,883 DEBUG HandlerThread:1329 [system_info.py:__init__():26] System info init +2024-05-23 13:04:07,883 DEBUG HandlerThread:1329 [system_info.py:__init__():41] System info init done +2024-05-23 13:04:07,883 INFO HandlerThread:1329 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:04:07,884 INFO SystemMonitor:1329 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:04:07,884 INFO HandlerThread:1329 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:04:07,890 INFO SystemMonitor:1329 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:04:07,891 INFO SystemMonitor:1329 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:04:07,894 INFO SystemMonitor:1329 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:04:07,897 INFO SystemMonitor:1329 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:04:07,993 DEBUG HandlerThread:1329 [system_info.py:probe():150] Probing system +2024-05-23 13:04:07,996 DEBUG HandlerThread:1329 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:04:08,006 ERROR HandlerThread:1329 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:04:08,006 DEBUG HandlerThread:1329 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:04:08,006 DEBUG HandlerThread:1329 [system_info.py:probe():198] Probing system done +2024-05-23 13:04:08,006 DEBUG HandlerThread:1329 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:04:07.993231', 'startedAt': '2024-05-23T13:04:07.485994', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.17863125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6171989440918}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:04:08,006 INFO HandlerThread:1329 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:04:08,006 INFO HandlerThread:1329 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:04:08,009 INFO HandlerThread:1329 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:04:08,014 DEBUG SenderThread:1329 [sender.py:send():378] send: files +2024-05-23 13:04:08,014 INFO SenderThread:1329 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:04:08,196 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:04:08,196 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:04:08,197 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:04:08,199 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:04:08,352 DEBUG SenderThread:1329 [sender.py:send():378] send: telemetry +2024-05-23 13:04:08,593 INFO wandb-upload_0:1329 [upload_job.py:push():130] Uploaded file /tmp/tmpfpyplp_twandb/04z10egu-wandb-metadata.json +2024-05-23 13:04:08,757 INFO Thread-12 :1329 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt +2024-05-23 13:04:08,758 INFO Thread-12 :1329 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +2024-05-23 13:04:08,758 INFO Thread-12 :1329 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-metadata.json +2024-05-23 13:04:10,757 INFO Thread-12 :1329 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +2024-05-23 13:04:13,357 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:04:18,739 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:04:18,764 INFO Thread-12 :1329 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +2024-05-23 13:04:19,155 DEBUG SenderThread:1329 [sender.py:send():378] send: exit +2024-05-23 13:04:19,155 INFO SenderThread:1329 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:04:19,155 INFO SenderThread:1329 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 13:04:19,157 INFO SenderThread:1329 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:04:19,157 INFO SenderThread:1329 [sender.py:send_exit():593] send defer +2024-05-23 13:04:19,157 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,157 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:04:19,157 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,157 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:04:19,157 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:04:19,157 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,157 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:04:19,157 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,158 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:04:19,158 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:04:19,158 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,158 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:04:19,158 INFO HandlerThread:1329 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:04:19,158 DEBUG SystemMonitor:1329 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:04:19,158 DEBUG SystemMonitor:1329 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:04:19,158 DEBUG SystemMonitor:1329 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:04:19,158 INFO HandlerThread:1329 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:04:19,161 INFO HandlerThread:1329 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:04:19,161 INFO HandlerThread:1329 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:04:19,161 INFO HandlerThread:1329 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:04:19,162 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,162 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:04:19,162 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:04:19,162 DEBUG SenderThread:1329 [sender.py:send():378] send: stats +2024-05-23 13:04:19,163 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,163 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:04:19,163 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,163 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:04:19,163 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:04:19,163 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,163 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:04:19,163 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,163 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:04:19,163 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:04:19,164 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,164 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:04:19,164 DEBUG SenderThread:1329 [sender.py:send():378] send: summary +2024-05-23 13:04:19,164 INFO SenderThread:1329 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:04:19,165 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,165 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:04:19,165 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:04:19,165 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,165 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:04:19,165 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,165 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:04:19,170 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:04:19,262 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:04:19,262 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:19,262 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:04:19,263 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:19,263 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:04:19,765 INFO Thread-12 :1329 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/config.yaml +2024-05-23 13:04:19,765 INFO Thread-12 :1329 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json +2024-05-23 13:04:20,155 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:04:20,378 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:04:20,378 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:04:20,378 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:20,378 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:04:20,378 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:20,378 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:04:20,378 INFO SenderThread:1329 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:04:20,379 INFO SenderThread:1329 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:04:20,379 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:04:20,379 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:20,379 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:04:20,379 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:20,379 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:04:20,379 INFO SenderThread:1329 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:04:20,766 INFO SenderThread:1329 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +2024-05-23 13:04:20,766 INFO SenderThread:1329 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files +2024-05-23 13:04:20,767 INFO SenderThread:1329 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log output.log +2024-05-23 13:04:20,767 INFO SenderThread:1329 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/config.yaml config.yaml +2024-05-23 13:04:20,769 INFO SenderThread:1329 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt requirements.txt +2024-05-23 13:04:20,769 INFO SenderThread:1329 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json wandb-summary.json +2024-05-23 13:04:20,770 INFO SenderThread:1329 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:04:20,770 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:04:20,770 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:20,770 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:04:20,770 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:20,770 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:04:20,770 INFO SenderThread:1329 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:04:21,021 INFO wandb-upload_0:1329 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +2024-05-23 13:04:21,156 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:04:21,156 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:04:21,354 INFO wandb-upload_1:1329 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/config.yaml +2024-05-23 13:04:21,406 INFO wandb-upload_3:1329 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-summary.json +2024-05-23 13:04:21,409 INFO wandb-upload_2:1329 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/requirements.txt +2024-05-23 13:04:21,609 INFO Thread-11 (_thread_body):1329 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:04:21,609 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:21,610 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:04:21,610 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:21,610 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:04:21,610 INFO SenderThread:1329 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:04:21,610 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:04:21,610 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:21,610 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:04:21,610 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:21,610 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:04:21,610 INFO SenderThread:1329 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:04:21,671 INFO SenderThread:1329 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:04:21,671 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:04:21,672 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:21,672 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:04:21,672 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:21,672 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:04:21,672 INFO SenderThread:1329 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:04:21,672 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:04:21,672 INFO HandlerThread:1329 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:04:21,672 DEBUG SenderThread:1329 [sender.py:send():378] send: final +2024-05-23 13:04:21,672 DEBUG SenderThread:1329 [sender.py:send():378] send: footer +2024-05-23 13:04:21,672 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: defer +2024-05-23 13:04:21,672 INFO SenderThread:1329 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:04:21,673 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:04:21,673 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:04:21,673 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:04:21,673 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:04:21,674 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:04:21,674 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:04:21,674 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:04:21,674 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:04:21,674 DEBUG SenderThread:1329 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:04:21,738 INFO MainThread:1329 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:04:21,738 INFO MainThread:1329 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:04:21,738 INFO MainThread:1329 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:04:21,739 DEBUG HandlerThread:1329 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:04:21,739 INFO HandlerThread:1329 [handler.py:finish():882] shutting down handler +2024-05-23 13:04:22,674 INFO WriterThread:1329 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/run-wvnshpcy.wandb +2024-05-23 13:04:22,738 INFO SenderThread:1329 [sender.py:finish():1545] shutting down sender +2024-05-23 13:04:22,738 INFO SenderThread:1329 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:04:22,738 INFO SenderThread:1329 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..239df6b37b8f78fdf6636a9a671fd77a5ee33ee9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Configure stats pid to 1174 +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:04:07,500 WARNING MainThread:1174 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:04:07,500 INFO MainThread:1174 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug.log +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/logs/debug-internal.log +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:init():610] starting backend +2024-05-23 13:04:07,501 INFO MainThread:1174 [wandb_init.py:init():614] setting up manager +2024-05-23 13:04:07,504 INFO MainThread:1174 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:04:07,505 INFO MainThread:1174 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:04:07,508 INFO MainThread:1174 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:04:07,517 INFO MainThread:1174 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:04:07,764 INFO MainThread:1174 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:04:07,875 INFO MainThread:1174 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:04:07,875 INFO MainThread:1174 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:04:08,197 INFO MainThread:1174 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:04:08,197 INFO MainThread:1174 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:04:08,198 INFO MainThread:1174 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:04:08,198 INFO MainThread:1174 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:04:08,201 INFO MainThread:1174 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:04:22,740 WARNING MsgRouterThr:1174 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/run-wvnshpcy.wandb b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/run-wvnshpcy.wandb new file mode 100644 index 0000000000000000000000000000000000000000..292eeb239c369a2910df6d7a1b9f9a072152a75e Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/run-wvnshpcy.wandb differ diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 b/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..f07a406d06ec5bb1e21ab8d2054c095139eb04e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c8180a1232b79bd9820f3d3bee72ae5d30daddd46db30922a76fa13ac7e6fdf +size 116248176 diff --git a/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 b/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..dab143352b51ddc4970ed8b37139649454c66d85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_infer.so.8 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e520be70d6ab22e5ac565049bdb7b1582483703b641c440a7c21e92e12715eae +size 90621456 diff --git a/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e165d13b316ada4119ce74edc3d3d65617ff5be Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9199d8e43f5bd08d1cd33a91c4d09d0c254944f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3825e2952a504b305ff3771995be5c3fb718ccf2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0f792068c5f0165f8817dc75255f32b11e5add --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__init__.py @@ -0,0 +1,81 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_grounding_dino": [ + "GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP", + "GroundingDinoConfig", + ], + "processing_grounding_dino": ["GroundingDinoProcessor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_grounding_dino"] = [ + "GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST", + "GroundingDinoForObjectDetection", + "GroundingDinoModel", + "GroundingDinoPreTrainedModel", + ] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_grounding_dino"] = ["GroundingDinoImageProcessor"] + + +if TYPE_CHECKING: + from .configuration_grounding_dino import ( + GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP, + GroundingDinoConfig, + ) + from .processing_grounding_dino import GroundingDinoProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_grounding_dino import ( + GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST, + GroundingDinoForObjectDetection, + GroundingDinoModel, + GroundingDinoPreTrainedModel, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_grounding_dino import GroundingDinoImageProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa36d0d84f40e2e00f3fbc554f8f6eed4881d61d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d029417d265db76352183826c42c349f8e7ba9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/convert_grounding_dino_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/convert_grounding_dino_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..facc184015737ba3cfc74c4bab9813aae6f73c48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/convert_grounding_dino_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b49f2fce4733a8ee46caa69d64ecc0ea0873fd3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe4d97a947851b5f9d3572dc0a46cafc9403e4b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49454ba89eb5364070b0618c6933fd33dd072f72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/configuration_grounding_dino.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/configuration_grounding_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..fe683035039600cee80ecd6889e5164fd1259499 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/configuration_grounding_dino.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Grounding DINO model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + +GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "IDEA-Research/grounding-dino-tiny": "https://huggingface.co/IDEA-Research/grounding-dino-tiny/resolve/main/config.json", +} + + +class GroundingDinoConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GroundingDinoModel`]. It is used to instantiate a + Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Grounding DINO + [IDEA-Research/grounding-dino-tiny](https://huggingface.co/IDEA-Research/grounding-dino-tiny) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): + The configuration of the backbone model. + backbone (`str`, *optional*): + Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this + will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` + is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. + use_pretrained_backbone (`bool`, *optional*, defaults to `False`): + Whether to use pretrained weights for the backbone. + use_timm_backbone (`bool`, *optional*, defaults to `False`): + Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers + library. + backbone_kwargs (`dict`, *optional*): + Keyword arguments to be passed to AutoBackbone when loading from a checkpoint + e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. + text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): + The config object or dictionary of the text backbone. + num_queries (`int`, *optional*, defaults to 900): + Number of object queries, i.e. detection slots. This is the maximal number of objects + [`GroundingDinoModel`] can detect in a single image. + encoder_layers (`int`, *optional*, defaults to 6): + Number of encoder layers. + encoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + encoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_layers (`int`, *optional*, defaults to 6): + Number of decoder layers. + decoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + decoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer decoder. + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether the model is used as an encoder/decoder or not. + activation_function (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + d_model (`int`, *optional*, defaults to 256): + Dimension of the layers. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + auxiliary_loss (`bool`, *optional*, defaults to `False`): + Whether auxiliary decoding losses (loss at each decoder layer) are to be used. + position_embedding_type (`str`, *optional*, defaults to `"sine"`): + Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. + num_feature_levels (`int`, *optional*, defaults to 4): + The number of input feature levels. + encoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the encoder. + decoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the decoder. + two_stage (`bool`, *optional*, defaults to `True`): + Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of + Grounding DINO, which are further fed into the decoder for iterative bounding box refinement. + class_cost (`float`, *optional*, defaults to 1.0): + Relative weight of the classification error in the Hungarian matching cost. + bbox_cost (`float`, *optional*, defaults to 5.0): + Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. + giou_cost (`float`, *optional*, defaults to 2.0): + Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. + bbox_loss_coefficient (`float`, *optional*, defaults to 5.0): + Relative weight of the L1 bounding box loss in the object detection loss. + giou_loss_coefficient (`float`, *optional*, defaults to 2.0): + Relative weight of the generalized IoU loss in the object detection loss. + focal_alpha (`float`, *optional*, defaults to 0.25): + Alpha parameter in the focal loss. + disable_custom_kernels (`bool`, *optional*, defaults to `False`): + Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom + kernels are not supported by PyTorch ONNX export. + max_text_len (`int`, *optional*, defaults to 256): + The maximum length of the text input. + text_enhancer_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the text enhancer. + fusion_droppath (`float`, *optional*, defaults to 0.1): + The droppath ratio for the fusion module. + fusion_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the fusion module. + embedding_init_target (`bool`, *optional*, defaults to `True`): + Whether to initialize the target with Embedding weights. + query_dim (`int`, *optional*, defaults to 4): + The dimension of the query vector. + decoder_bbox_embed_share (`bool`, *optional*, defaults to `True`): + Whether to share the bbox regression head for all decoder layers. + two_stage_bbox_embed_share (`bool`, *optional*, defaults to `False`): + Whether to share the bbox embedding between the two-stage bbox generator and the region proposal + generation. + positional_embedding_temperature (`float`, *optional*, defaults to 20): + The temperature for Sine Positional Embedding that is used together with vision backbone. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + + Examples: + + ```python + >>> from transformers import GroundingDinoConfig, GroundingDinoModel + + >>> # Initializing a Grounding DINO IDEA-Research/grounding-dino-tiny style configuration + >>> configuration = GroundingDinoConfig() + + >>> # Initializing a model (with random weights) from the IDEA-Research/grounding-dino-tiny style configuration + >>> model = GroundingDinoModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "grounding-dino" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + } + + def __init__( + self, + backbone_config=None, + backbone=None, + use_pretrained_backbone=False, + use_timm_backbone=False, + backbone_kwargs=None, + text_config=None, + num_queries=900, + encoder_layers=6, + encoder_ffn_dim=2048, + encoder_attention_heads=8, + decoder_layers=6, + decoder_ffn_dim=2048, + decoder_attention_heads=8, + is_encoder_decoder=True, + activation_function="relu", + d_model=256, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + auxiliary_loss=False, + position_embedding_type="sine", + num_feature_levels=4, + encoder_n_points=4, + decoder_n_points=4, + two_stage=True, + class_cost=1.0, + bbox_cost=5.0, + giou_cost=2.0, + bbox_loss_coefficient=5.0, + giou_loss_coefficient=2.0, + focal_alpha=0.25, + disable_custom_kernels=False, + # other parameters + max_text_len=256, + text_enhancer_dropout=0.0, + fusion_droppath=0.1, + fusion_dropout=0.0, + embedding_init_target=True, + query_dim=4, + decoder_bbox_embed_share=True, + two_stage_bbox_embed_share=False, + positional_embedding_temperature=20, + init_std=0.02, + layer_norm_eps=1e-5, + **kwargs, + ): + if not use_timm_backbone and use_pretrained_backbone: + raise ValueError( + "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`" + ) + + if backbone_config is not None and backbone is not None: + raise ValueError("You can't specify both `backbone` and `backbone_config`.") + + if backbone_config is None and backbone is None: + logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") + backbone_config = CONFIG_MAPPING["swin"]( + window_size=7, + image_size=224, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + out_indices=[2, 3, 4], + ) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.pop("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: + raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + + if text_config is None: + text_config = {} + logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") + + self.backbone_config = backbone_config + self.backbone = backbone + self.use_pretrained_backbone = use_pretrained_backbone + self.use_timm_backbone = use_timm_backbone + self.backbone_kwargs = backbone_kwargs + self.num_queries = num_queries + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.auxiliary_loss = auxiliary_loss + self.position_embedding_type = position_embedding_type + # deformable attributes + self.num_feature_levels = num_feature_levels + self.encoder_n_points = encoder_n_points + self.decoder_n_points = decoder_n_points + self.two_stage = two_stage + # Hungarian matcher + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + # Loss coefficients + self.bbox_loss_coefficient = bbox_loss_coefficient + self.giou_loss_coefficient = giou_loss_coefficient + self.focal_alpha = focal_alpha + self.disable_custom_kernels = disable_custom_kernels + # Text backbone + if isinstance(text_config, dict): + text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "bert" + text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) + elif text_config is None: + text_config = CONFIG_MAPPING["bert"]() + + self.text_config = text_config + self.max_text_len = max_text_len + + # Text Enhancer + self.text_enhancer_dropout = text_enhancer_dropout + # Fusion + self.fusion_droppath = fusion_droppath + self.fusion_dropout = fusion_dropout + # Others + self.embedding_init_target = embedding_init_target + self.query_dim = query_dim + self.decoder_bbox_embed_share = decoder_bbox_embed_share + self.two_stage_bbox_embed_share = two_stage_bbox_embed_share + if two_stage_bbox_embed_share and not decoder_bbox_embed_share: + raise ValueError("If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.") + self.positional_embedding_temperature = positional_embedding_temperature + self.init_std = init_std + self.layer_norm_eps = layer_norm_eps + super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + @property + def num_attention_heads(self) -> int: + return self.encoder_attention_heads + + @property + def hidden_size(self) -> int: + return self.d_model diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..ac8e82bfd825d6d1aa5fbe25ab8059dce5deef0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py @@ -0,0 +1,491 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Grounding DINO checkpoints from the original repository. + +URL: https://github.com/IDEA-Research/GroundingDINO""" + +import argparse + +import requests +import torch +from PIL import Image +from torchvision import transforms as T + +from transformers import ( + AutoTokenizer, + GroundingDinoConfig, + GroundingDinoForObjectDetection, + GroundingDinoImageProcessor, + GroundingDinoProcessor, + SwinConfig, +) + + +IMAGENET_MEAN = [0.485, 0.456, 0.406] +IMAGENET_STD = [0.229, 0.224, 0.225] + + +def get_grounding_dino_config(model_name): + if "tiny" in model_name: + window_size = 7 + embed_dim = 96 + depths = (2, 2, 6, 2) + num_heads = (3, 6, 12, 24) + image_size = 224 + elif "base" in model_name: + window_size = 12 + embed_dim = 128 + depths = (2, 2, 18, 2) + num_heads = (4, 8, 16, 32) + image_size = 384 + else: + raise ValueError("Model not supported, only supports base and large variants") + + backbone_config = SwinConfig( + window_size=window_size, + image_size=image_size, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + out_indices=[2, 3, 4], + ) + + config = GroundingDinoConfig(backbone_config=backbone_config) + + return config + + +def create_rename_keys(state_dict, config): + rename_keys = [] + # fmt: off + ########################################## VISION BACKBONE - START + # patch embedding layer + rename_keys.append(("backbone.0.patch_embed.proj.weight", + "model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.weight")) + rename_keys.append(("backbone.0.patch_embed.proj.bias", + "model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.bias")) + rename_keys.append(("backbone.0.patch_embed.norm.weight", + "model.backbone.conv_encoder.model.embeddings.norm.weight")) + rename_keys.append(("backbone.0.patch_embed.norm.bias", + "model.backbone.conv_encoder.model.embeddings.norm.bias")) + + for layer, depth in enumerate(config.backbone_config.depths): + for block in range(depth): + # layernorms + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm1.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm1.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.bias")) + + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm2.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm2.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.bias")) + # attention + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.relative_position_bias_table", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.relative_position_bias_table")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.proj.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.proj.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.bias")) + # intermediate + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.bias")) + + # output + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.bias")) + + # downsample + if layer!=len(config.backbone_config.depths)-1: + rename_keys.append((f"backbone.0.layers.{layer}.downsample.reduction.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.reduction.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.downsample.norm.weight", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.weight")) + rename_keys.append((f"backbone.0.layers.{layer}.downsample.norm.bias", + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.bias")) + + for out_indice in config.backbone_config.out_indices: + # Grounding DINO implementation of out_indices isn't aligned with transformers + rename_keys.append((f"backbone.0.norm{out_indice-1}.weight", + f"model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.weight")) + rename_keys.append((f"backbone.0.norm{out_indice-1}.bias", + f"model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.bias")) + + ########################################## VISION BACKBONE - END + + ########################################## ENCODER - START + deformable_key_mappings = { + 'self_attn.sampling_offsets.weight': 'deformable_layer.self_attn.sampling_offsets.weight', + 'self_attn.sampling_offsets.bias': 'deformable_layer.self_attn.sampling_offsets.bias', + 'self_attn.attention_weights.weight': 'deformable_layer.self_attn.attention_weights.weight', + 'self_attn.attention_weights.bias': 'deformable_layer.self_attn.attention_weights.bias', + 'self_attn.value_proj.weight': 'deformable_layer.self_attn.value_proj.weight', + 'self_attn.value_proj.bias': 'deformable_layer.self_attn.value_proj.bias', + 'self_attn.output_proj.weight': 'deformable_layer.self_attn.output_proj.weight', + 'self_attn.output_proj.bias': 'deformable_layer.self_attn.output_proj.bias', + 'norm1.weight': 'deformable_layer.self_attn_layer_norm.weight', + 'norm1.bias': 'deformable_layer.self_attn_layer_norm.bias', + 'linear1.weight': 'deformable_layer.fc1.weight', + 'linear1.bias': 'deformable_layer.fc1.bias', + 'linear2.weight': 'deformable_layer.fc2.weight', + 'linear2.bias': 'deformable_layer.fc2.bias', + 'norm2.weight': 'deformable_layer.final_layer_norm.weight', + 'norm2.bias': 'deformable_layer.final_layer_norm.bias', + } + text_enhancer_key_mappings = { + 'self_attn.in_proj_weight': 'text_enhancer_layer.self_attn.in_proj_weight', + 'self_attn.in_proj_bias': 'text_enhancer_layer.self_attn.in_proj_bias', + 'self_attn.out_proj.weight': 'text_enhancer_layer.self_attn.out_proj.weight', + 'self_attn.out_proj.bias': 'text_enhancer_layer.self_attn.out_proj.bias', + 'linear1.weight': 'text_enhancer_layer.fc1.weight', + 'linear1.bias': 'text_enhancer_layer.fc1.bias', + 'linear2.weight': 'text_enhancer_layer.fc2.weight', + 'linear2.bias': 'text_enhancer_layer.fc2.bias', + 'norm1.weight': 'text_enhancer_layer.layer_norm_before.weight', + 'norm1.bias': 'text_enhancer_layer.layer_norm_before.bias', + 'norm2.weight': 'text_enhancer_layer.layer_norm_after.weight', + 'norm2.bias': 'text_enhancer_layer.layer_norm_after.bias', + } + fusion_key_mappings = { + 'gamma_v': 'fusion_layer.vision_param', + 'gamma_l': 'fusion_layer.text_param', + 'layer_norm_v.weight': 'fusion_layer.layer_norm_vision.weight', + 'layer_norm_v.bias': 'fusion_layer.layer_norm_vision.bias', + 'layer_norm_l.weight': 'fusion_layer.layer_norm_text.weight', + 'layer_norm_l.bias': 'fusion_layer.layer_norm_text.bias', + 'attn.v_proj.weight': 'fusion_layer.attn.vision_proj.weight', + 'attn.v_proj.bias': 'fusion_layer.attn.vision_proj.bias', + 'attn.l_proj.weight': 'fusion_layer.attn.text_proj.weight', + 'attn.l_proj.bias': 'fusion_layer.attn.text_proj.bias', + 'attn.values_v_proj.weight': 'fusion_layer.attn.values_vision_proj.weight', + 'attn.values_v_proj.bias': 'fusion_layer.attn.values_vision_proj.bias', + 'attn.values_l_proj.weight': 'fusion_layer.attn.values_text_proj.weight', + 'attn.values_l_proj.bias': 'fusion_layer.attn.values_text_proj.bias', + 'attn.out_v_proj.weight': 'fusion_layer.attn.out_vision_proj.weight', + 'attn.out_v_proj.bias': 'fusion_layer.attn.out_vision_proj.bias', + 'attn.out_l_proj.weight': 'fusion_layer.attn.out_text_proj.weight', + 'attn.out_l_proj.bias': 'fusion_layer.attn.out_text_proj.bias', + } + for layer in range(config.encoder_layers): + # deformable + for src, dest in deformable_key_mappings.items(): + rename_keys.append((f"transformer.encoder.layers.{layer}.{src}", + f"model.encoder.layers.{layer}.{dest}")) + # text enhance + for src, dest in text_enhancer_key_mappings.items(): + rename_keys.append((f"transformer.encoder.text_layers.{layer}.{src}", + f"model.encoder.layers.{layer}.{dest}")) + # fusion layers + for src, dest in fusion_key_mappings.items(): + rename_keys.append((f"transformer.encoder.fusion_layers.{layer}.{src}", + f"model.encoder.layers.{layer}.{dest}")) + ########################################## ENCODER - END + + ########################################## DECODER - START + key_mappings_decoder = { + 'cross_attn.sampling_offsets.weight': 'encoder_attn.sampling_offsets.weight', + 'cross_attn.sampling_offsets.bias': 'encoder_attn.sampling_offsets.bias', + 'cross_attn.attention_weights.weight': 'encoder_attn.attention_weights.weight', + 'cross_attn.attention_weights.bias': 'encoder_attn.attention_weights.bias', + 'cross_attn.value_proj.weight': 'encoder_attn.value_proj.weight', + 'cross_attn.value_proj.bias': 'encoder_attn.value_proj.bias', + 'cross_attn.output_proj.weight': 'encoder_attn.output_proj.weight', + 'cross_attn.output_proj.bias': 'encoder_attn.output_proj.bias', + 'norm1.weight': 'encoder_attn_layer_norm.weight', + 'norm1.bias': 'encoder_attn_layer_norm.bias', + 'ca_text.in_proj_weight': 'encoder_attn_text.in_proj_weight', + 'ca_text.in_proj_bias': 'encoder_attn_text.in_proj_bias', + 'ca_text.out_proj.weight': 'encoder_attn_text.out_proj.weight', + 'ca_text.out_proj.bias': 'encoder_attn_text.out_proj.bias', + 'catext_norm.weight': 'encoder_attn_text_layer_norm.weight', + 'catext_norm.bias': 'encoder_attn_text_layer_norm.bias', + 'self_attn.in_proj_weight': 'self_attn.in_proj_weight', + 'self_attn.in_proj_bias': 'self_attn.in_proj_bias', + 'self_attn.out_proj.weight': 'self_attn.out_proj.weight', + 'self_attn.out_proj.bias': 'self_attn.out_proj.bias', + 'norm2.weight': 'self_attn_layer_norm.weight', + 'norm2.bias': 'self_attn_layer_norm.bias', + 'linear1.weight': 'fc1.weight', + 'linear1.bias': 'fc1.bias', + 'linear2.weight': 'fc2.weight', + 'linear2.bias': 'fc2.bias', + 'norm3.weight': 'final_layer_norm.weight', + 'norm3.bias': 'final_layer_norm.bias', + } + for layer_num in range(config.decoder_layers): + source_prefix_decoder = f'transformer.decoder.layers.{layer_num}.' + target_prefix_decoder = f'model.decoder.layers.{layer_num}.' + + for source_name, target_name in key_mappings_decoder.items(): + rename_keys.append((source_prefix_decoder + source_name, + target_prefix_decoder + target_name)) + ########################################## DECODER - END + + ########################################## Additional - START + for layer_name, params in state_dict.items(): + #### TEXT BACKBONE + if "bert" in layer_name: + rename_keys.append((layer_name, layer_name.replace("bert", "model.text_backbone"))) + #### INPUT PROJ - PROJECT OUTPUT FEATURES FROM VISION BACKBONE + if "input_proj" in layer_name: + rename_keys.append((layer_name, layer_name.replace("input_proj", "model.input_proj_vision"))) + #### INPUT PROJ - PROJECT OUTPUT FEATURES FROM TEXT BACKBONE + if "feat_map" in layer_name: + rename_keys.append((layer_name, layer_name.replace("feat_map", "model.text_projection"))) + #### DECODER REFERENCE POINT HEAD + if "transformer.decoder.ref_point_head" in layer_name: + rename_keys.append((layer_name, layer_name.replace("transformer.decoder.ref_point_head", + "model.decoder.reference_points_head"))) + #### DECODER BBOX EMBED + if "transformer.decoder.bbox_embed" in layer_name: + rename_keys.append((layer_name, layer_name.replace("transformer.decoder.bbox_embed", + "model.decoder.bbox_embed"))) + if "transformer.enc_output" in layer_name: + rename_keys.append((layer_name, layer_name.replace("transformer", "model"))) + + if "transformer.enc_out_bbox_embed" in layer_name: + rename_keys.append((layer_name, layer_name.replace("transformer.enc_out_bbox_embed", + "model.encoder_output_bbox_embed"))) + + rename_keys.append(("transformer.level_embed", "model.level_embed")) + rename_keys.append(("transformer.decoder.norm.weight", "model.decoder.layer_norm.weight")) + rename_keys.append(("transformer.decoder.norm.bias", "model.decoder.layer_norm.bias")) + rename_keys.append(("transformer.tgt_embed.weight", "model.query_position_embeddings.weight")) + ########################################## Additional - END + + # fmt: on + return rename_keys + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_q_k_v_encoder(state_dict, config): + ########################################## VISION BACKBONE - START + embed_dim = config.backbone_config.embed_dim + for layer, depth in enumerate(config.backbone_config.depths): + hidden_size = embed_dim * 2**layer + for block in range(depth): + # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"backbone.0.layers.{layer}.blocks.{block}.attn.qkv.weight") + in_proj_bias = state_dict.pop(f"backbone.0.layers.{layer}.blocks.{block}.attn.qkv.bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.weight" + ] = in_proj_weight[:hidden_size, :] + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.bias" + ] = in_proj_bias[:hidden_size] + + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.weight" + ] = in_proj_weight[hidden_size : hidden_size * 2, :] + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.bias" + ] = in_proj_bias[hidden_size : hidden_size * 2] + + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.weight" + ] = in_proj_weight[-hidden_size:, :] + state_dict[ + f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.bias" + ] = in_proj_bias[-hidden_size:] + ########################################## VISION BACKBONE - END + + +def read_in_q_k_v_text_enhancer(state_dict, config): + hidden_size = config.hidden_size + for idx in range(config.encoder_layers): + # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.weight"] = in_proj_weight[ + :hidden_size, : + ] + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.bias"] = in_proj_bias[:hidden_size] + + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.weight"] = in_proj_weight[ + hidden_size : hidden_size * 2, : + ] + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.bias"] = in_proj_bias[ + hidden_size : hidden_size * 2 + ] + + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.weight"] = in_proj_weight[ + -hidden_size:, : + ] + state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.bias"] = in_proj_bias[ + -hidden_size: + ] + + +def read_in_q_k_v_decoder(state_dict, config): + hidden_size = config.hidden_size + for idx in range(config.decoder_layers): + # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"model.decoder.layers.{idx}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"model.decoder.layers.{idx}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"model.decoder.layers.{idx}.self_attn.query.weight"] = in_proj_weight[:hidden_size, :] + state_dict[f"model.decoder.layers.{idx}.self_attn.query.bias"] = in_proj_bias[:hidden_size] + + state_dict[f"model.decoder.layers.{idx}.self_attn.key.weight"] = in_proj_weight[ + hidden_size : hidden_size * 2, : + ] + state_dict[f"model.decoder.layers.{idx}.self_attn.key.bias"] = in_proj_bias[hidden_size : hidden_size * 2] + + state_dict[f"model.decoder.layers.{idx}.self_attn.value.weight"] = in_proj_weight[-hidden_size:, :] + state_dict[f"model.decoder.layers.{idx}.self_attn.value.bias"] = in_proj_bias[-hidden_size:] + + # read in weights + bias of cross-attention + in_proj_weight = state_dict.pop(f"model.decoder.layers.{idx}.encoder_attn_text.in_proj_weight") + in_proj_bias = state_dict.pop(f"model.decoder.layers.{idx}.encoder_attn_text.in_proj_bias") + + # next, add query, keys and values (in that order) to the state dict + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.query.weight"] = in_proj_weight[:hidden_size, :] + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.query.bias"] = in_proj_bias[:hidden_size] + + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.key.weight"] = in_proj_weight[ + hidden_size : hidden_size * 2, : + ] + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.key.bias"] = in_proj_bias[ + hidden_size : hidden_size * 2 + ] + + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.value.weight"] = in_proj_weight[-hidden_size:, :] + state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.value.bias"] = in_proj_bias[-hidden_size:] + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + image = Image.open(requests.get(url, stream=True).raw).convert("RGB") + return image + + +def preprocess_caption(caption: str) -> str: + result = caption.lower().strip() + if result.endswith("."): + return result + return result + "." + + +@torch.no_grad() +def convert_grounding_dino_checkpoint(args): + model_name = args.model_name + pytorch_dump_folder_path = args.pytorch_dump_folder_path + push_to_hub = args.push_to_hub + verify_logits = args.verify_logits + + checkpoint_mapping = { + "grounding-dino-tiny": "https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swint_ogc.pth", + "grounding-dino-base": "https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swinb_cogcoor.pth", + } + # Define default GroundingDino configuation + config = get_grounding_dino_config(model_name) + + # Load original checkpoint + checkpoint_url = checkpoint_mapping[model_name] + original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] + original_state_dict = {k.replace("module.", ""): v for k, v in original_state_dict.items()} + + for name, param in original_state_dict.items(): + print(name, param.shape) + + # Rename keys + new_state_dict = original_state_dict.copy() + rename_keys = create_rename_keys(original_state_dict, config) + + for src, dest in rename_keys: + rename_key(new_state_dict, src, dest) + read_in_q_k_v_encoder(new_state_dict, config) + read_in_q_k_v_text_enhancer(new_state_dict, config) + read_in_q_k_v_decoder(new_state_dict, config) + + # Load HF model + model = GroundingDinoForObjectDetection(config) + model.eval() + missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) + print("Missing keys:", missing_keys) + print("Unexpected keys:", unexpected_keys) + + # Load and process test image + image = prepare_img() + transforms = T.Compose([T.Resize(size=800, max_size=1333), T.ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + original_pixel_values = transforms(image).unsqueeze(0) + + image_processor = GroundingDinoImageProcessor() + tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") + processor = GroundingDinoProcessor(image_processor=image_processor, tokenizer=tokenizer) + + text = "a cat" + inputs = processor(images=image, text=preprocess_caption(text), return_tensors="pt") + + assert torch.allclose(original_pixel_values, inputs.pixel_values, atol=1e-4) + + if verify_logits: + # Running forward + with torch.no_grad(): + outputs = model(**inputs) + + print(outputs.logits[0, :3, :3]) + + expected_slice = torch.tensor( + [[-4.8913, -0.1900, -0.2161], [-4.9653, -0.3719, -0.3950], [-5.9599, -3.3765, -3.3104]] + ) + + assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + model.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + model.push_to_hub(f"EduardoPacheco/{model_name}") + processor.push_to_hub(f"EduardoPacheco/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="grounding-dino-tiny", + type=str, + choices=["grounding-dino-tiny", "grounding-dino-base"], + help="Name of the GroundingDino model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + parser.add_argument( + "--verify_logits", action="store_false", help="Whether or not to verify logits after conversion." + ) + + args = parser.parse_args() + convert_grounding_dino_checkpoint(args) diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/image_processing_grounding_dino.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/image_processing_grounding_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..8b39d6801ca00057cf454f543d462f92ad3738cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/image_processing_grounding_dino.py @@ -0,0 +1,1511 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Deformable DETR.""" + +import io +import pathlib +from collections import defaultdict +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union + +import numpy as np + +from ...feature_extraction_utils import BatchFeature +from ...image_processing_utils import BaseImageProcessor, get_size_dict +from ...image_transforms import ( + PaddingMode, + center_to_corners_format, + corners_to_center_format, + id_to_rgb, + pad, + rescale, + resize, + rgb_to_id, + to_channel_dimension_format, +) +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_annotations, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import ( + ExplicitEnum, + TensorType, + is_flax_available, + is_jax_tensor, + is_scipy_available, + is_tf_available, + is_tf_tensor, + is_torch_available, + is_torch_tensor, + is_vision_available, + logging, +) + + +if is_torch_available(): + import torch + from torch import nn + + +if is_vision_available(): + import PIL + +if is_scipy_available(): + import scipy.special + import scipy.stats + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +AnnotationType = Dict[str, Union[int, str, List[Dict]]] + + +class AnnotationFormat(ExplicitEnum): + COCO_DETECTION = "coco_detection" + COCO_PANOPTIC = "coco_panoptic" + + +SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) + + +# Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio +def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: + """ + Computes the output image size given the input image size and the desired output size. + + Args: + image_size (`Tuple[int, int]`): + The input image size. + size (`int`): + The desired output size. + max_size (`int`, *optional*): + The maximum allowed output size. + """ + height, width = image_size + if max_size is not None: + min_original_size = float(min((height, width))) + max_original_size = float(max((height, width))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (height <= width and height == size) or (width <= height and width == size): + return height, width + + if width < height: + ow = size + oh = int(size * height / width) + else: + oh = size + ow = int(size * width / height) + return (oh, ow) + + +# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size +def get_resize_output_image_size( + input_image: np.ndarray, + size: Union[int, Tuple[int, int], List[int]], + max_size: Optional[int] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[int, int]: + """ + Computes the output image size given the input image size and the desired output size. If the desired output size + is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output + image size is computed by keeping the aspect ratio of the input image size. + + Args: + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): + The desired output size. + max_size (`int`, *optional*): + The maximum allowed output size. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input image. + """ + image_size = get_image_size(input_image, input_data_format) + if isinstance(size, (list, tuple)): + return size + + return get_size_with_aspect_ratio(image_size, size, max_size) + + +# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn +def get_numpy_to_framework_fn(arr) -> Callable: + """ + Returns a function that converts a numpy array to the framework of the input array. + + Args: + arr (`np.ndarray`): The array to convert. + """ + if isinstance(arr, np.ndarray): + return np.array + if is_tf_available() and is_tf_tensor(arr): + import tensorflow as tf + + return tf.convert_to_tensor + if is_torch_available() and is_torch_tensor(arr): + import torch + + return torch.tensor + if is_flax_available() and is_jax_tensor(arr): + import jax.numpy as jnp + + return jnp.array + raise ValueError(f"Cannot convert arrays of type {type(arr)}") + + +# Copied from transformers.models.detr.image_processing_detr.safe_squeeze +def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: + """ + Squeezes an array, but only if the axis specified has dim 1. + """ + if axis is None: + return arr.squeeze() + + try: + return arr.squeeze(axis=axis) + except ValueError: + return arr + + +# Copied from transformers.models.detr.image_processing_detr.normalize_annotation +def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: + image_height, image_width = image_size + norm_annotation = {} + for key, value in annotation.items(): + if key == "boxes": + boxes = value + boxes = corners_to_center_format(boxes) + boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) + norm_annotation[key] = boxes + else: + norm_annotation[key] = value + return norm_annotation + + +# Copied from transformers.models.detr.image_processing_detr.max_across_indices +def max_across_indices(values: Iterable[Any]) -> List[Any]: + """ + Return the maximum value across all indices of an iterable of values. + """ + return [max(values_i) for values_i in zip(*values)] + + +# Copied from transformers.models.detr.image_processing_detr.get_max_height_width +def get_max_height_width( + images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> List[int]: + """ + Get the maximum height and width across all images in a batch. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(images[0]) + + if input_data_format == ChannelDimension.FIRST: + _, max_height, max_width = max_across_indices([img.shape for img in images]) + elif input_data_format == ChannelDimension.LAST: + max_height, max_width, _ = max_across_indices([img.shape for img in images]) + else: + raise ValueError(f"Invalid channel dimension format: {input_data_format}") + return (max_height, max_width) + + +# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask +def make_pixel_mask( + image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> np.ndarray: + """ + Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. + + Args: + image (`np.ndarray`): + Image to make the pixel mask for. + output_size (`Tuple[int, int]`): + Output size of the mask. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + mask = np.zeros(output_size, dtype=np.int64) + mask[:input_height, :input_width] = 1 + return mask + + +# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask +def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: + """ + Convert a COCO polygon annotation to a mask. + + Args: + segmentations (`List[List[float]]`): + List of polygons, each polygon represented by a list of x-y coordinates. + height (`int`): + Height of the mask. + width (`int`): + Width of the mask. + """ + try: + from pycocotools import mask as coco_mask + except ImportError: + raise ImportError("Pycocotools is not installed in your environment.") + + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = np.asarray(mask, dtype=np.uint8) + mask = np.any(mask, axis=2) + masks.append(mask) + if masks: + masks = np.stack(masks, axis=0) + else: + masks = np.zeros((0, height, width), dtype=np.uint8) + + return masks + + +# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->GroundingDino +def prepare_coco_detection_annotation( + image, + target, + return_segmentation_masks: bool = False, + input_data_format: Optional[Union[ChannelDimension, str]] = None, +): + """ + Convert the target in COCO format into the format expected by GroundingDino. + """ + image_height, image_width = get_image_size(image, channel_dim=input_data_format) + + image_id = target["image_id"] + image_id = np.asarray([image_id], dtype=np.int64) + + # Get all COCO annotations for the given image. + annotations = target["annotations"] + annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] + + classes = [obj["category_id"] for obj in annotations] + classes = np.asarray(classes, dtype=np.int64) + + # for conversion to coco api + area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) + iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) + + boxes = [obj["bbox"] for obj in annotations] + # guard against no boxes via resizing + boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) + boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + + new_target = {} + new_target["image_id"] = image_id + new_target["class_labels"] = classes[keep] + new_target["boxes"] = boxes[keep] + new_target["area"] = area[keep] + new_target["iscrowd"] = iscrowd[keep] + new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) + + if annotations and "keypoints" in annotations[0]: + keypoints = [obj["keypoints"] for obj in annotations] + # Converting the filtered keypoints list to a numpy array + keypoints = np.asarray(keypoints, dtype=np.float32) + # Apply the keep mask here to filter the relevant annotations + keypoints = keypoints[keep] + num_keypoints = keypoints.shape[0] + keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints + new_target["keypoints"] = keypoints + + if return_segmentation_masks: + segmentation_masks = [obj["segmentation"] for obj in annotations] + masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) + new_target["masks"] = masks[keep] + + return new_target + + +# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes +def masks_to_boxes(masks: np.ndarray) -> np.ndarray: + """ + Compute the bounding boxes around the provided panoptic segmentation masks. + + Args: + masks: masks in format `[number_masks, height, width]` where N is the number of masks + + Returns: + boxes: bounding boxes in format `[number_masks, 4]` in xyxy format + """ + if masks.size == 0: + return np.zeros((0, 4)) + + h, w = masks.shape[-2:] + y = np.arange(0, h, dtype=np.float32) + x = np.arange(0, w, dtype=np.float32) + # see https://github.com/pytorch/pytorch/issues/50276 + y, x = np.meshgrid(y, x, indexing="ij") + + x_mask = masks * np.expand_dims(x, axis=0) + x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) + x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) + x_min = x.filled(fill_value=1e8) + x_min = x_min.reshape(x_min.shape[0], -1).min(-1) + + y_mask = masks * np.expand_dims(y, axis=0) + y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) + y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) + y_min = y.filled(fill_value=1e8) + y_min = y_min.reshape(y_min.shape[0], -1).min(-1) + + return np.stack([x_min, y_min, x_max, y_max], 1) + + +# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->GroundingDino +def prepare_coco_panoptic_annotation( + image: np.ndarray, + target: Dict, + masks_path: Union[str, pathlib.Path], + return_masks: bool = True, + input_data_format: Union[ChannelDimension, str] = None, +) -> Dict: + """ + Prepare a coco panoptic annotation for GroundingDino. + """ + image_height, image_width = get_image_size(image, channel_dim=input_data_format) + annotation_path = pathlib.Path(masks_path) / target["file_name"] + + new_target = {} + new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) + new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) + new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) + + if "segments_info" in target: + masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) + masks = rgb_to_id(masks) + + ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) + masks = masks == ids[:, None, None] + masks = masks.astype(np.uint8) + if return_masks: + new_target["masks"] = masks + new_target["boxes"] = masks_to_boxes(masks) + new_target["class_labels"] = np.array( + [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 + ) + new_target["iscrowd"] = np.asarray( + [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 + ) + new_target["area"] = np.asarray( + [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 + ) + + return new_target + + +# Copied from transformers.models.detr.image_processing_detr.get_segmentation_image +def get_segmentation_image( + masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False +): + h, w = input_size + final_h, final_w = target_size + + m_id = scipy.special.softmax(masks.transpose(0, 1), -1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = np.zeros((h, w), dtype=np.int64) + else: + m_id = m_id.argmax(-1).reshape(h, w) + + if deduplicate: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + for eq_id in equiv: + m_id[m_id == eq_id] = equiv[0] + + seg_img = id_to_rgb(m_id) + seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) + return seg_img + + +# Copied from transformers.models.detr.image_processing_detr.get_mask_area +def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: + final_h, final_w = target_size + np_seg_img = seg_img.astype(np.uint8) + np_seg_img = np_seg_img.reshape(final_h, final_w, 3) + m_id = rgb_to_id(np_seg_img) + area = [(m_id == i).sum() for i in range(n_classes)] + return area + + +# Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities +def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + probs = scipy.special.softmax(logits, axis=-1) + labels = probs.argmax(-1, keepdims=True) + scores = np.take_along_axis(probs, labels, axis=-1) + scores, labels = scores.squeeze(-1), labels.squeeze(-1) + return scores, labels + + +# Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample +def post_process_panoptic_sample( + out_logits: np.ndarray, + masks: np.ndarray, + boxes: np.ndarray, + processed_size: Tuple[int, int], + target_size: Tuple[int, int], + is_thing_map: Dict, + threshold=0.85, +) -> Dict: + """ + Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample. + + Args: + out_logits (`torch.Tensor`): + The logits for this sample. + masks (`torch.Tensor`): + The predicted segmentation masks for this sample. + boxes (`torch.Tensor`): + The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, + width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). + processed_size (`Tuple[int, int]`): + The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size + after data augmentation but before batching. + target_size (`Tuple[int, int]`): + The target size of the image, `(height, width)` corresponding to the requested final size of the + prediction. + is_thing_map (`Dict`): + A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. + threshold (`float`, *optional*, defaults to 0.85): + The threshold used to binarize the segmentation masks. + """ + # we filter empty queries and detection below threshold + scores, labels = score_labels_from_class_probabilities(out_logits) + keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) + + cur_scores = scores[keep] + cur_classes = labels[keep] + cur_boxes = center_to_corners_format(boxes[keep]) + + if len(cur_boxes) != len(cur_classes): + raise ValueError("Not as many boxes as there are classes") + + cur_masks = masks[keep] + cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) + cur_masks = safe_squeeze(cur_masks, 1) + b, h, w = cur_masks.shape + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.reshape(b, -1) + stuff_equiv_classes = defaultdict(list) + for k, label in enumerate(cur_classes): + if not is_thing_map[label]: + stuff_equiv_classes[label].append(k) + + seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) + area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) + + # We filter out any mask that is too small + if cur_classes.size() > 0: + # We know filter empty masks as long as we find some + filtered_small = np.array([a <= 4 for a in area], dtype=bool) + while filtered_small.any(): + cur_masks = cur_masks[~filtered_small] + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) + area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) + filtered_small = np.array([a <= 4 for a in area], dtype=bool) + else: + cur_classes = np.ones((1, 1), dtype=np.int64) + + segments_info = [ + {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} + for i, (cat, a) in enumerate(zip(cur_classes, area)) + ] + del cur_classes + + with io.BytesIO() as out: + PIL.Image.fromarray(seg_img).save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + + return predictions + + +# Copied from transformers.models.detr.image_processing_detr.resize_annotation +def resize_annotation( + annotation: Dict[str, Any], + orig_size: Tuple[int, int], + target_size: Tuple[int, int], + threshold: float = 0.5, + resample: PILImageResampling = PILImageResampling.NEAREST, +): + """ + Resizes an annotation to a target size. + + Args: + annotation (`Dict[str, Any]`): + The annotation dictionary. + orig_size (`Tuple[int, int]`): + The original size of the input image. + target_size (`Tuple[int, int]`): + The target size of the image, as returned by the preprocessing `resize` step. + threshold (`float`, *optional*, defaults to 0.5): + The threshold used to binarize the segmentation masks. + resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): + The resampling filter to use when resizing the masks. + """ + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) + ratio_height, ratio_width = ratios + + new_annotation = {} + new_annotation["size"] = target_size + + for key, value in annotation.items(): + if key == "boxes": + boxes = value + scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) + new_annotation["boxes"] = scaled_boxes + elif key == "area": + area = value + scaled_area = area * (ratio_width * ratio_height) + new_annotation["area"] = scaled_area + elif key == "masks": + masks = value[:, None] + masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) + masks = masks.astype(np.float32) + masks = masks[:, 0] > threshold + new_annotation["masks"] = masks + elif key == "size": + new_annotation["size"] = target_size + else: + new_annotation[key] = value + + return new_annotation + + +# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle +def binary_mask_to_rle(mask): + """ + Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return list(runs) + + +# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + +# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + + Args: + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + + return masks[to_keep], scores[to_keep], labels[to_keep] + + +# Copied from transformers.models.detr.image_processing_detr.check_segment_validity +def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= mask_threshold).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +# Copied from transformers.models.detr.image_processing_detr.compute_segments +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = nn.functional.interpolate( + mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity( + mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold + ) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + +class GroundingDinoImageProcessor(BaseImageProcessor): + r""" + Constructs a Grounding DINO image processor. + + Args: + format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): + Data format of the annotations. One of "coco_detection" or "coco_panoptic". + do_resize (`bool`, *optional*, defaults to `True`): + Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be + overridden by the `do_resize` parameter in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): + Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in + the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. + do_rescale (`bool`, *optional*, defaults to `True`): + Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. Controls whether to normalize the image. Can be overridden by the `do_normalize` + parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean values to use when normalizing the image. Can be a single value or a list of values, one for each + channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one + for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_annotations (`bool`, *optional*, defaults to `True`): + Controls whether to convert the annotations to the format expected by the DETR model. Converts the + bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. + Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be + overridden by the `do_pad` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values", "pixel_mask"] + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ + def __init__( + self, + format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Union[float, List[float]] = None, + image_std: Union[float, List[float]] = None, + do_convert_annotations: Optional[bool] = None, + do_pad: bool = True, + **kwargs, + ) -> None: + if "pad_and_return_pixel_mask" in kwargs: + do_pad = kwargs.pop("pad_and_return_pixel_mask") + + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` parameter is deprecated and will be removed in v4.26. " + "Please specify in `size['longest_edge'] instead`.", + ) + max_size = kwargs.pop("max_size") + else: + max_size = None if size is None else 1333 + + size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} + size = get_size_dict(size, max_size=max_size, default_to_square=False) + + # Backwards compatibility + if do_convert_annotations is None: + do_convert_annotations = do_normalize + + super().__init__(**kwargs) + self.format = format + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.do_convert_annotations = do_convert_annotations + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self.do_pad = do_pad + self._valid_processor_keys = [ + "images", + "annotations", + "return_segmentation_masks", + "masks_path", + "do_resize", + "size", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "do_convert_annotations", + "image_mean", + "image_std", + "do_pad", + "format", + "return_tensors", + "data_format", + "input_data_format", + ] + + @classmethod + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->GroundingDino + def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): + """ + Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is + created using from_dict and kwargs e.g. `GroundingDinoImageProcessor.from_pretrained(checkpoint, size=600, + max_size=800)` + """ + image_processor_dict = image_processor_dict.copy() + if "max_size" in kwargs: + image_processor_dict["max_size"] = kwargs.pop("max_size") + if "pad_and_return_pixel_mask" in kwargs: + image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") + return super().from_dict(image_processor_dict, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->GroundingDino + def prepare_annotation( + self, + image: np.ndarray, + target: Dict, + format: Optional[AnnotationFormat] = None, + return_segmentation_masks: bool = None, + masks_path: Optional[Union[str, pathlib.Path]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> Dict: + """ + Prepare an annotation for feeding into GroundingDino model. + """ + format = format if format is not None else self.format + + if format == AnnotationFormat.COCO_DETECTION: + return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks + target = prepare_coco_detection_annotation( + image, target, return_segmentation_masks, input_data_format=input_data_format + ) + elif format == AnnotationFormat.COCO_PANOPTIC: + return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks + target = prepare_coco_panoptic_annotation( + image, + target, + masks_path=masks_path, + return_masks=return_segmentation_masks, + input_data_format=input_data_format, + ) + else: + raise ValueError(f"Format {format} is not supported.") + return target + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare + def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): + logger.warning_once( + "The `prepare` method is deprecated and will be removed in a v4.33. " + "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " + "does not return the image anymore.", + ) + target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) + return image, target + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask + def convert_coco_poly_to_mask(self, *args, **kwargs): + logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ") + return convert_coco_poly_to_mask(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection + def prepare_coco_detection(self, *args, **kwargs): + logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ") + return prepare_coco_detection_annotation(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic + def prepare_coco_panoptic(self, *args, **kwargs): + logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ") + return prepare_coco_panoptic_annotation(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an + int, smaller edge of the image will be matched to this number. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or + `height` and `width`. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use if resizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` parameter is deprecated and will be removed in v4.26. " + "Please specify in `size['longest_edge'] instead`.", + ) + max_size = kwargs.pop("max_size") + else: + max_size = None + size = get_size_dict(size, max_size=max_size, default_to_square=False) + if "shortest_edge" in size and "longest_edge" in size: + size = get_resize_output_image_size( + image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format + ) + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + else: + raise ValueError( + "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" + f" {size.keys()}." + ) + image = resize( + image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs + ) + return image + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation + def resize_annotation( + self, + annotation, + orig_size, + size, + resample: PILImageResampling = PILImageResampling.NEAREST, + ) -> Dict: + """ + Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched + to this number. + """ + return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale + def rescale( + self, + image: np.ndarray, + rescale_factor: float, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Rescale the image by the given factor. image = image * rescale_factor. + + Args: + image (`np.ndarray`): + Image to rescale. + rescale_factor (`float`): + The value to use for rescaling. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. If unset, is inferred from the input image. Can be + one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation + def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: + """ + Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to + `[center_x, center_y, width, height]` format and from absolute to relative pixel values. + """ + return normalize_annotation(annotation, image_size=image_size) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image + def _update_annotation_for_padded_image( + self, + annotation: Dict, + input_image_size: Tuple[int, int], + output_image_size: Tuple[int, int], + padding, + update_bboxes, + ) -> Dict: + """ + Update the annotation for a padded image. + """ + new_annotation = {} + new_annotation["size"] = output_image_size + + for key, value in annotation.items(): + if key == "masks": + masks = value + masks = pad( + masks, + padding, + mode=PaddingMode.CONSTANT, + constant_values=0, + input_data_format=ChannelDimension.FIRST, + ) + masks = safe_squeeze(masks, 1) + new_annotation["masks"] = masks + elif key == "boxes" and update_bboxes: + boxes = value + boxes *= np.asarray( + [ + input_image_size[1] / output_image_size[1], + input_image_size[0] / output_image_size[0], + input_image_size[1] / output_image_size[1], + input_image_size[0] / output_image_size[0], + ] + ) + new_annotation["boxes"] = boxes + elif key == "size": + new_annotation["size"] = output_image_size + else: + new_annotation[key] = value + return new_annotation + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image + def _pad_image( + self, + image: np.ndarray, + output_size: Tuple[int, int], + annotation: Optional[Dict[str, Any]] = None, + constant_values: Union[float, Iterable[float]] = 0, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + update_bboxes: bool = True, + ) -> np.ndarray: + """ + Pad an image with zeros to the given size. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = output_size + + pad_bottom = output_height - input_height + pad_right = output_width - input_width + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=PaddingMode.CONSTANT, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + if annotation is not None: + annotation = self._update_annotation_for_padded_image( + annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes + ) + return padded_image, annotation + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad + def pad( + self, + images: List[np.ndarray], + annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, + constant_values: Union[float, Iterable[float]] = 0, + return_pixel_mask: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + update_bboxes: bool = True, + ) -> BatchFeature: + """ + Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width + in the batch and optionally returns their corresponding pixel mask. + + Args: + images (List[`np.ndarray`]): + Images to pad. + annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): + Annotations to transform according to the padding that is applied to the images. + constant_values (`float` or `Iterable[float]`, *optional*): + The value to use for the padding if `mode` is `"constant"`. + return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether to return a pixel mask. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + update_bboxes (`bool`, *optional*, defaults to `True`): + Whether to update the bounding boxes in the annotations to match the padded images. If the + bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` + format, the bounding boxes will not be updated. + """ + pad_size = get_max_height_width(images, input_data_format=input_data_format) + + annotation_list = annotations if annotations is not None else [None] * len(images) + padded_images = [] + padded_annotations = [] + for image, annotation in zip(images, annotation_list): + padded_image, padded_annotation = self._pad_image( + image, + pad_size, + annotation, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + update_bboxes=update_bboxes, + ) + padded_images.append(padded_image) + padded_annotations.append(padded_annotation) + + data = {"pixel_values": padded_images} + + if return_pixel_mask: + masks = [ + make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) + for image in images + ] + data["pixel_mask"] = masks + + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + if annotations is not None: + encoded_inputs["labels"] = [ + BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations + ] + + return encoded_inputs + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess + def preprocess( + self, + images: ImageInput, + annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, + return_segmentation_masks: bool = None, + masks_path: Optional[Union[str, pathlib.Path]] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample=None, # PILImageResampling + do_rescale: Optional[bool] = None, + rescale_factor: Optional[Union[int, float]] = None, + do_normalize: Optional[bool] = None, + do_convert_annotations: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + format: Optional[Union[str, AnnotationFormat]] = None, + return_tensors: Optional[Union[TensorType, str]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> BatchFeature: + """ + Preprocess an image or a batch of images so that it can be used by the model. + + Args: + images (`ImageInput`): + Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging + from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. + annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): + List of annotations associated with the image or batch of images. If annotation is for object + detection, the annotations should be a dictionary with the following keys: + - "image_id" (`int`): The image id. + - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a + dictionary. An image can have no annotations, in which case the list should be empty. + If annotation is for segmentation, the annotations should be a dictionary with the following keys: + - "image_id" (`int`): The image id. + - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. + An image can have no segments, in which case the list should be empty. + - "file_name" (`str`): The file name of the image. + return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): + Whether to return segmentation masks. + masks_path (`str` or `pathlib.Path`, *optional*): + Path to the directory containing the segmentation masks. + do_resize (`bool`, *optional*, defaults to self.do_resize): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to self.size): + Size of the image after resizing. + resample (`PILImageResampling`, *optional*, defaults to self.resample): + Resampling filter to use when resizing the image. + do_rescale (`bool`, *optional*, defaults to self.do_rescale): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to self.rescale_factor): + Rescale factor to use when rescaling the image. + do_normalize (`bool`, *optional*, defaults to self.do_normalize): + Whether to normalize the image. + do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): + Whether to convert the annotations to the format expected by the model. Converts the bounding + boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` + and in relative coordinates. + image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): + Mean to use when normalizing the image. + image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): + Standard deviation to use when normalizing the image. + do_pad (`bool`, *optional*, defaults to self.do_pad): + Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch + and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros. + format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): + Format of the annotations. + return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): + Type of tensors to return. If `None`, will return the list of images. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + if "pad_and_return_pixel_mask" in kwargs: + logger.warning_once( + "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " + "use `do_pad` instead." + ) + do_pad = kwargs.pop("pad_and_return_pixel_mask") + + max_size = None + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` argument is deprecated and will be removed in a future version, use" + " `size['longest_edge']` instead." + ) + size = kwargs.pop("max_size") + + do_resize = self.do_resize if do_resize is None else do_resize + size = self.size if size is None else size + size = get_size_dict(size=size, max_size=max_size, default_to_square=False) + resample = self.resample if resample is None else resample + do_rescale = self.do_rescale if do_rescale is None else do_rescale + rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor + do_normalize = self.do_normalize if do_normalize is None else do_normalize + image_mean = self.image_mean if image_mean is None else image_mean + image_std = self.image_std if image_std is None else image_std + do_convert_annotations = ( + self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations + ) + do_pad = self.do_pad if do_pad is None else do_pad + format = self.format if format is None else format + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated. + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if annotations is not None and isinstance(annotations, dict): + annotations = [annotations] + + if annotations is not None and len(images) != len(annotations): + raise ValueError( + f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." + ) + + format = AnnotationFormat(format) + if annotations is not None: + validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) + + if ( + masks_path is not None + and format == AnnotationFormat.COCO_PANOPTIC + and not isinstance(masks_path, (pathlib.Path, str)) + ): + raise ValueError( + "The path to the directory containing the mask PNG files should be provided as a" + f" `pathlib.Path` or string object, but is {type(masks_path)} instead." + ) + + # All transformations expect numpy arrays + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) + if annotations is not None: + prepared_images = [] + prepared_annotations = [] + for image, target in zip(images, annotations): + target = self.prepare_annotation( + image, + target, + format, + return_segmentation_masks=return_segmentation_masks, + masks_path=masks_path, + input_data_format=input_data_format, + ) + prepared_images.append(image) + prepared_annotations.append(target) + images = prepared_images + annotations = prepared_annotations + del prepared_images, prepared_annotations + + # transformations + if do_resize: + if annotations is not None: + resized_images, resized_annotations = [], [] + for image, target in zip(images, annotations): + orig_size = get_image_size(image, input_data_format) + resized_image = self.resize( + image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format + ) + resized_annotation = self.resize_annotation( + target, orig_size, get_image_size(resized_image, input_data_format) + ) + resized_images.append(resized_image) + resized_annotations.append(resized_annotation) + images = resized_images + annotations = resized_annotations + del resized_images, resized_annotations + else: + images = [ + self.resize(image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] + + if do_normalize: + images = [ + self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images + ] + + if do_convert_annotations and annotations is not None: + annotations = [ + self.normalize_annotation(annotation, get_image_size(image, input_data_format)) + for annotation, image in zip(annotations, images) + ] + + if do_pad: + # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} + encoded_inputs = self.pad( + images, + annotations=annotations, + return_pixel_mask=True, + data_format=data_format, + input_data_format=input_data_format, + update_bboxes=do_convert_annotations, + return_tensors=return_tensors, + ) + else: + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + for image in images + ] + encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) + if annotations is not None: + encoded_inputs["labels"] = [ + BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations + ] + + return encoded_inputs + + # Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection with OwlViT->GroundingDino + def post_process_object_detection( + self, outputs, threshold: float = 0.1, target_sizes: Union[TensorType, List[Tuple]] = None + ): + """ + Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, + bottom_right_x, bottom_right_y) format. + + Args: + outputs ([`GroundingDinoObjectDetectionOutput`]): + Raw outputs of the model. + threshold (`float`, *optional*): + Score threshold to keep object detection predictions. + target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): + Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size + `(height, width)` of each image in the batch. If unset, predictions will not be resized. + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + # TODO: (amy) add support for other frameworks + logits, boxes = outputs.logits, outputs.pred_boxes + + if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + probs = torch.max(logits, dim=-1) + scores = torch.sigmoid(probs.values) + labels = probs.indices + + # Convert to [x0, y0, x1, y1] format + boxes = center_to_corners_format(boxes) + + # Convert from relative [0, 1] to absolute [0, height] coordinates + if target_sizes is not None: + if isinstance(target_sizes, List): + img_h = torch.Tensor([i[0] for i in target_sizes]) + img_w = torch.Tensor([i[1] for i in target_sizes]) + else: + img_h, img_w = target_sizes.unbind(1) + + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) + boxes = boxes * scale_fct[:, None, :] + + results = [] + for s, l, b in zip(scores, labels, boxes): + score = s[s > threshold] + label = l[s > threshold] + box = b[s > threshold] + results.append({"scores": score, "labels": label, "boxes": box}) + + return results diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/modeling_grounding_dino.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/modeling_grounding_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..a98901015c94c66c6fa41d642e772945396be1f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -0,0 +1,3132 @@ +# coding=utf-8 +# Copyright 2024 IDEA Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Grounding DINO model.""" + +import copy +import math +import os +import warnings +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import Tensor, nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ...activations import ACT2FN +from ...file_utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_scipy_available, + is_timm_available, + is_torch_cuda_available, + is_vision_available, + replace_return_docstrings, + requires_backends, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import meshgrid +from ...utils import is_accelerate_available, is_ninja_available, logging +from ...utils.backbone_utils import load_backbone +from ..auto import AutoModel +from .configuration_grounding_dino import GroundingDinoConfig + + +if is_vision_available(): + from transformers.image_transforms import center_to_corners_format + +if is_accelerate_available(): + from accelerate import PartialState + from accelerate.utils import reduce + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + +if is_timm_available(): + from timm import create_model + + +logger = logging.get_logger(__name__) + +MultiScaleDeformableAttention = None + + +# Copied from models.deformable_detr.load_cuda_kernels +def load_cuda_kernels(): + from torch.utils.cpp_extension import load + + global MultiScaleDeformableAttention + + root = Path(__file__).resolve().parent.parent.parent / "kernels" / "grounding_dino" + src_files = [ + root / filename + for filename in [ + "vision.cpp", + os.path.join("cpu", "ms_deform_attn_cpu.cpp"), + os.path.join("cuda", "ms_deform_attn_cuda.cu"), + ] + ] + + MultiScaleDeformableAttention = load( + "MultiScaleDeformableAttention", + src_files, + with_cuda=True, + extra_include_paths=[str(root)], + extra_cflags=["-DWITH_CUDA=1"], + extra_cuda_cflags=[ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ], + ) + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.MultiScaleDeformableAttentionFunction +class MultiScaleDeformableAttentionFunction(Function): + @staticmethod + def forward( + context, + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step, + ): + context.im2col_step = im2col_step + output = MultiScaleDeformableAttention.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + context.im2col_step, + ) + context.save_for_backward( + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights + ) + return output + + @staticmethod + @once_differentiable + def backward(context, grad_output): + ( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + ) = context.saved_tensors + grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output, + context.im2col_step, + ) + + return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "GroundingDinoConfig" +_CHECKPOINT_FOR_DOC = "IDEA-Research/grounding-dino-tiny" + +GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "IDEA-Research/grounding-dino-tiny", + # See all Grounding DINO models at https://huggingface.co/models?filter=grounding-dino +] + + +@dataclass +class GroundingDinoDecoderOutput(ModelOutput): + """ + Base class for outputs of the GroundingDinoDecoder. This class adds two attributes to + BaseModelOutputWithCrossAttentions, namely: + - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) + - a stacked tensor of intermediate reference points. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + + +@dataclass +class GroundingDinoEncoderOutput(ModelOutput): + """ + Base class for outputs of the GroundingDinoEncoder. This class extends BaseModelOutput, due to: + - vision and text last hidden states + - vision and text intermediate hidden states + + Args: + last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the vision encoder. + last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the text encoder. + vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each + layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the + output of each layer plus the initial embedding outputs. + text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer) + of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of + each layer plus the initial embedding outputs. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and + multi-scale deformable attention heads. + """ + + last_hidden_state_vision: torch.FloatTensor = None + last_hidden_state_text: torch.FloatTensor = None + vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + + +@dataclass +class GroundingDinoModelOutput(ModelOutput): + """ + Base class for outputs of the Grounding DINO encoder-decoder model. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. + encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each + layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the + output of each layer plus the initial embedding outputs. + encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer) + of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of + each layer plus the initial embedding outputs. + encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and + multi-scale deformable attention heads. attention softmax, used to compute the weighted average in the + bi-attention heads. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as + region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and + background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + last_hidden_state: torch.FloatTensor = None + init_reference_points: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None + encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None + encoder_vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + enc_outputs_class: Optional[torch.FloatTensor] = None + enc_outputs_coord_logits: Optional[torch.FloatTensor] = None + + +@dataclass +class GroundingDinoObjectDetectionOutput(ModelOutput): + """ + Output type of [`GroundingDinoForObjectDetection`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): + Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a + bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized + scale-invariant IoU loss. + loss_dict (`Dict`, *optional*): + A dictionary containing the individual losses. Useful for logging. + logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): + Classification logits (including no-object) for all queries. + pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These + values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding + possible padding). You can use [`~GroundingDinoProcessor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. + auxiliary_outputs (`List[Dict]`, *optional*): + Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) + and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and + `pred_boxes`) for each decoder layer. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the self-attention, cross-attention and multi-scale deformable attention heads. + encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each + layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the + output of each layer plus the initial embedding outputs. + encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer) + of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of + each layer plus the initial embedding outputs. + encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the + weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and + multi-scale deformable attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as + region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and + background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + loss: Optional[torch.FloatTensor] = None + loss_dict: Optional[Dict] = None + logits: torch.FloatTensor = None + pred_boxes: torch.FloatTensor = None + auxiliary_outputs: Optional[List[Dict]] = None + last_hidden_state: Optional[torch.FloatTensor] = None + init_reference_points: Optional[torch.FloatTensor] = None + intermediate_hidden_states: Optional[torch.FloatTensor] = None + intermediate_reference_points: Optional[torch.FloatTensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None + encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None + encoder_vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + enc_outputs_class: Optional[torch.FloatTensor] = None + enc_outputs_coord_logits: Optional[torch.FloatTensor] = None + + +# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->GroundingDino +class GroundingDinoFrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than + torchvision.models.resnet[18,34,50,101] produce nans. + """ + + def __init__(self, n): + super().__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + # move reshapes to the beginning + # to make it user-friendly + weight = self.weight.reshape(1, -1, 1, 1) + bias = self.bias.reshape(1, -1, 1, 1) + running_var = self.running_var.reshape(1, -1, 1, 1) + running_mean = self.running_mean.reshape(1, -1, 1, 1) + epsilon = 1e-5 + scale = weight * (running_var + epsilon).rsqrt() + bias = bias - running_mean * scale + return x * scale + bias + + +# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->GroundingDino +def replace_batch_norm(model): + r""" + Recursively replace all `torch.nn.BatchNorm2d` with `GroundingDinoFrozenBatchNorm2d`. + + Args: + model (torch.nn.Module): + input model + """ + for name, module in model.named_children(): + if isinstance(module, nn.BatchNorm2d): + new_module = GroundingDinoFrozenBatchNorm2d(module.num_features) + + if not module.weight.device == torch.device("meta"): + new_module.weight.data.copy_(module.weight) + new_module.bias.data.copy_(module.bias) + new_module.running_mean.data.copy_(module.running_mean) + new_module.running_var.data.copy_(module.running_var) + + model._modules[name] = new_module + + if len(list(module.children())) > 0: + replace_batch_norm(module) + + +class GroundingDinoConvEncoder(nn.Module): + """ + Convolutional backbone, using either the AutoBackbone API or one from the timm library. + + nn.BatchNorm2d layers are replaced by GroundingDinoFrozenBatchNorm2d as defined above. + + """ + + def __init__(self, config): + super().__init__() + + self.config = config + + if config.use_timm_backbone: + requires_backends(self, ["timm"]) + backbone = create_model( + config.backbone, + pretrained=config.use_pretrained_backbone, + features_only=True, + **config.backbone_kwargs, + ) + else: + backbone = load_backbone(config) + + # replace batch norm by frozen batch norm + with torch.no_grad(): + replace_batch_norm(backbone) + self.model = backbone + self.intermediate_channel_sizes = ( + self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels + ) + + backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type + if "resnet" in backbone_model_type: + for name, parameter in self.model.named_parameters(): + if config.use_timm_backbone: + if "layer2" not in name and "layer3" not in name and "layer4" not in name: + parameter.requires_grad_(False) + else: + if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: + parameter.requires_grad_(False) + + # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->GroundingDino + def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): + # send pixel_values through the model to get list of feature maps + features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + + out = [] + for feature_map in features: + # downsample pixel_mask to match shape of corresponding feature_map + mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] + out.append((feature_map, mask)) + return out + + +# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->GroundingDino +class GroundingDinoConvModel(nn.Module): + """ + This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. + """ + + def __init__(self, conv_encoder, position_embedding): + super().__init__() + self.conv_encoder = conv_encoder + self.position_embedding = position_embedding + + def forward(self, pixel_values, pixel_mask): + # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples + out = self.conv_encoder(pixel_values, pixel_mask) + pos = [] + for feature_map, mask in out: + # position encoding + pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) + + return out, pos + + +class GroundingDinoSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__(self, config): + super().__init__() + self.embedding_dim = config.d_model // 2 + self.temperature = config.positional_embedding_temperature + self.scale = 2 * math.pi + + def forward(self, pixel_values, pixel_mask): + y_embed = pixel_mask.cumsum(1, dtype=torch.float32) + x_embed = pixel_mask.cumsum(2, dtype=torch.float32) + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class GroundingDinoLearnedPositionEmbedding(nn.Module): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, config): + super().__init__() + + embedding_dim = config.d_model // 2 + self.row_embeddings = nn.Embedding(50, embedding_dim) + self.column_embeddings = nn.Embedding(50, embedding_dim) + + def forward(self, pixel_values, pixel_mask=None): + height, width = pixel_values.shape[-2:] + width_values = torch.arange(width, device=pixel_values.device) + height_values = torch.arange(height, device=pixel_values.device) + x_emb = self.column_embeddings(width_values) + y_emb = self.row_embeddings(height_values) + pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) + pos = pos.permute(2, 0, 1) + pos = pos.unsqueeze(0) + pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) + return pos + + +def build_position_encoding(config): + if config.position_embedding_type == "sine": + position_embedding = GroundingDinoSinePositionEmbedding(config) + elif config.position_embedding_type == "learned": + position_embedding = GroundingDinoLearnedPositionEmbedding(config) + else: + raise ValueError(f"Not supported {config.position_embedding_type}") + + return position_embedding + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention +def multi_scale_deformable_attention( + value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor +) -> Tensor: + batch_size, _, num_heads, hidden_dim = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape + value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level_id, (height, width) in enumerate(value_spatial_shapes): + # batch_size, height*width, num_heads, hidden_dim + # -> batch_size, height*width, num_heads*hidden_dim + # -> batch_size, num_heads*hidden_dim, height*width + # -> batch_size*num_heads, hidden_dim, height, width + value_l_ = ( + value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) + ) + # batch_size, num_queries, num_heads, num_points, 2 + # -> batch_size, num_heads, num_queries, num_points, 2 + # -> batch_size*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) + # batch_size*num_heads, hidden_dim, num_queries, num_points + sampling_value_l_ = nn.functional.grid_sample( + value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False + ) + sampling_value_list.append(sampling_value_l_) + # (batch_size, num_queries, num_heads, num_levels, num_points) + # -> (batch_size, num_heads, num_queries, num_levels, num_points) + # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + batch_size * num_heads, 1, num_queries, num_levels * num_points + ) + output = ( + (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) + .sum(-1) + .view(batch_size, num_heads * hidden_dim, num_queries) + ) + return output.transpose(1, 2).contiguous() + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->GroundingDino, Deformable DETR->Grounding DINO +class GroundingDinoMultiscaleDeformableAttention(nn.Module): + """ + Multiscale deformable attention as proposed in Deformable DETR. + """ + + def __init__(self, config: GroundingDinoConfig, num_heads: int, n_points: int): + super().__init__() + + kernel_loaded = MultiScaleDeformableAttention is not None + if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: + try: + load_cuda_kernels() + except Exception as e: + logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") + + if config.d_model % num_heads != 0: + raise ValueError( + f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" + ) + dim_per_head = config.d_model // num_heads + # check if dim_per_head is power of 2 + if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): + warnings.warn( + "You'd better set embed_dim (d_model) in GroundingDinoMultiscaleDeformableAttention to make the" + " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" + " implementation." + ) + + self.im2col_step = 64 + + self.d_model = config.d_model + self.n_levels = config.num_feature_levels + self.n_heads = num_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) + self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) + self.value_proj = nn.Linear(config.d_model, config.d_model) + self.output_proj = nn.Linear(config.d_model, config.d_model) + + self.disable_custom_kernels = config.disable_custom_kernels + + self._reset_parameters() + + def _reset_parameters(self): + nn.init.constant_(self.sampling_offsets.weight.data, 0.0) + default_dtype = torch.get_default_dtype() + thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = ( + (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) + .view(self.n_heads, 1, 1, 2) + .repeat(1, self.n_levels, self.n_points, 1) + ) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + nn.init.constant_(self.attention_weights.weight.data, 0.0) + nn.init.constant_(self.attention_weights.bias.data, 0.0) + nn.init.xavier_uniform_(self.value_proj.weight.data) + nn.init.constant_(self.value_proj.bias.data, 0.0) + nn.init.xavier_uniform_(self.output_proj.weight.data) + nn.init.constant_(self.output_proj.bias.data, 0.0) + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + batch_size, num_queries, _ = hidden_states.shape + batch_size, sequence_length, _ = encoder_hidden_states.shape + if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: + raise ValueError( + "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" + ) + + value = self.value_proj(encoder_hidden_states) + if attention_mask is not None: + # we invert the attention_mask + value = value.masked_fill(~attention_mask[..., None], float(0)) + value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 + ) + attention_weights = self.attention_weights(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels * self.n_points + ) + attention_weights = F.softmax(attention_weights, -1).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points + ) + # batch_size, num_queries, n_heads, n_levels, n_points, 2 + num_coordinates = reference_points.shape[-1] + if num_coordinates == 2: + offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = ( + reference_points[:, :, None, :, None, :] + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + ) + elif num_coordinates == 4: + sampling_locations = ( + reference_points[:, :, None, :, None, :2] + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + ) + else: + raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") + + if self.disable_custom_kernels: + # PyTorch implementation + output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) + else: + try: + # custom kernel + output = MultiScaleDeformableAttentionFunction.apply( + value, + spatial_shapes, + level_start_index, + sampling_locations, + attention_weights, + self.im2col_step, + ) + except Exception: + # PyTorch implementation + output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + + return output, attention_weights + + +class GroundingDinoTextEnhancerLayer(nn.Module): + """Vanilla Transformer with text embeddings as input""" + + def __init__(self, config): + super().__init__() + self.self_attn = GroundingDinoMultiheadAttention( + config, num_attention_heads=config.encoder_attention_heads // 2 + ) + + # Implementation of Feedforward model + self.fc1 = nn.Linear(config.d_model, config.encoder_ffn_dim // 2) + self.fc2 = nn.Linear(config.encoder_ffn_dim // 2, config.d_model) + + self.layer_norm_before = nn.LayerNorm(config.d_model, config.layer_norm_eps) + self.layer_norm_after = nn.LayerNorm(config.d_model, config.layer_norm_eps) + + self.activation = ACT2FN[config.activation_function] + self.num_heads = config.encoder_attention_heads // 2 + self.dropout = config.text_enhancer_dropout + + def with_pos_embed(self, hidden_state: Tensor, position_embeddings: Optional[Tensor]): + return hidden_state if position_embeddings is None else hidden_state + position_embeddings + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_masks: Optional[torch.BoolTensor] = None, + position_embeddings: Optional[torch.FloatTensor] = None, + ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: + """Text self-attention to enhance projection of text features generated by + the text encoder (AutoModel based on text_config) within GroundingDinoEncoderLayer + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`): + Text features generated by the text encoder. + attention_masks (`torch.BoolTensor`, *optional*): + Attention mask for text self-attention. False for real tokens and True for padding tokens. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings to be added to the hidden states. + + Returns: + `tuple(torch.FloatTensor)` comprising two elements: + - **hidden_states** (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) -- + Output of the text self-attention layer. + - **attention_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length, + sequence_length)`) -- + Attention weights of the text self-attention layer. + """ + + # repeat attn mask + if attention_masks.dim() == 3 and attention_masks.shape[0] == hidden_states.shape[0]: + # batch_size, num_queries, num_keys + attention_masks = attention_masks[:, None, :, :] + attention_masks = attention_masks.repeat(1, self.num_heads, 1, 1) + + dtype = torch.float16 + attention_masks = attention_masks.to(dtype=dtype) # fp16 compatibility + attention_masks = (1.0 - attention_masks) * torch.finfo(dtype).min + + queries = keys = self.with_pos_embed(hidden_states, position_embeddings) + attention_output, attention_weights = self.self_attn( + queries=queries, + keys=keys, + values=hidden_states, + attention_mask=attention_masks, + output_attentions=True, + ) + attention_output = nn.functional.dropout(attention_output, p=self.dropout, training=self.training) + hidden_states = hidden_states + attention_output + hidden_states = self.layer_norm_before(hidden_states) + + residual = hidden_states + hidden_states = self.activation(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = hidden_states + residual + hidden_states = self.layer_norm_after(hidden_states) + + return hidden_states, attention_weights + + +class GroundingDinoBiMultiHeadAttention(nn.Module): + def __init__(self, config): + super().__init__() + + vision_dim = text_dim = config.d_model + embed_dim = config.encoder_ffn_dim // 2 + num_heads = config.encoder_attention_heads // 2 + dropout = config.fusion_dropout + + self.embed_dim = embed_dim + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.vision_dim = vision_dim + self.text_dim = text_dim + + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"`embed_dim` must be divisible by `num_heads` (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." + ) + self.scale = self.head_dim ** (-0.5) + self.dropout = dropout + + self.vision_proj = nn.Linear(self.vision_dim, self.embed_dim) + self.text_proj = nn.Linear(self.text_dim, self.embed_dim) + self.values_vision_proj = nn.Linear(self.vision_dim, self.embed_dim) + self.values_text_proj = nn.Linear(self.text_dim, self.embed_dim) + + self.out_vision_proj = nn.Linear(self.embed_dim, self.vision_dim) + self.out_text_proj = nn.Linear(self.embed_dim, self.text_dim) + + def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + vision_features: torch.FloatTensor, + text_features: torch.FloatTensor, + vision_attention_mask: Optional[torch.BoolTensor] = None, + text_attention_mask: Optional[torch.BoolTensor] = None, + ) -> Tuple[Tuple[torch.FloatTensor, torch.FloatTensor], Tuple[torch.FloatTensor, torch.FloatTensor]]: + """Image-to-text and text-to-image cross-attention + + Args: + vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`): + Projected flattened image features generated by the vision backbone. + text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`): + Projected text features generated by the text encoder. + vision_attention_mask (`torch.BoolTensor`, **optional**): + Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens. + text_attention_mask (`torch.BoolTensor`, **optional**): + Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens. + + Returns: + `tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an attention + output and weights: + - **vision_attn_output** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_din)`) + -- + Output of the image-to-text cross-attention layer. + - **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length, + vision_sequence_length)`) -- + Attention weights of the image-to-text cross-attention layer. + - **text_attn_output** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`) -- + Output of the text-to-image cross-attention layer. + - **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length, + text_sequence_length)`) -- + Attention weights of the text-to-image cross-attention layer. + """ + batch_size, tgt_len, _ = vision_features.size() + + vision_query_states = self.vision_proj(vision_features) * self.scale + vision_query_states = self._reshape(vision_query_states, tgt_len, batch_size) + + text_key_states = self.text_proj(text_features) + text_key_states = self._reshape(text_key_states, -1, batch_size) + + vision_value_states = self.values_vision_proj(vision_features) + vision_value_states = self._reshape(vision_value_states, -1, batch_size) + + text_value_states = self.values_text_proj(text_features) + text_value_states = self._reshape(text_value_states, -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + + vision_query_states = vision_query_states.view(*proj_shape) + text_key_states = text_key_states.view(*proj_shape) + vision_value_states = vision_value_states.view(*proj_shape) + text_value_states = text_value_states.view(*proj_shape) + + src_len = text_key_states.size(1) + attn_weights = torch.bmm(vision_query_states, text_key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt + + if attn_weights.size() != (batch_size * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" + ) + + attn_weights = attn_weights - attn_weights.max() + # Do not increase -50000/50000, data type half has quite limited range + attn_weights = torch.clamp(attn_weights, min=-50000, max=50000) + + attn_weights_transposed = attn_weights.transpose(1, 2) + text_attn_weights = attn_weights_transposed - torch.max(attn_weights_transposed, dim=-1, keepdim=True)[0] + + # Do not increase -50000/50000, data type half has quite limited range + text_attn_weights = torch.clamp(text_attn_weights, min=-50000, max=50000) + + # mask vision for language + if vision_attention_mask is not None: + vision_attention_mask = ( + vision_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) + ) + text_attn_weights.masked_fill_(vision_attention_mask, float("-inf")) + + text_attn_weights = text_attn_weights.softmax(dim=-1) + + # mask language for vision + if text_attention_mask is not None: + text_attention_mask = text_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) + attn_weights.masked_fill_(text_attention_mask, float("-inf")) + vision_attn_weights = attn_weights.softmax(dim=-1) + + vision_attn_probs = F.dropout(vision_attn_weights, p=self.dropout, training=self.training) + text_attn_probs = F.dropout(text_attn_weights, p=self.dropout, training=self.training) + + vision_attn_output = torch.bmm(vision_attn_probs, text_value_states) + text_attn_output = torch.bmm(text_attn_probs, vision_value_states) + + if vision_attn_output.size() != (batch_size * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`vision_attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {vision_attn_output.size()}" + ) + + if text_attn_output.size() != (batch_size * self.num_heads, src_len, self.head_dim): + raise ValueError( + f"`text_attn_output` should be of size {(batch_size, self.num_heads, src_len, self.head_dim)}, but is {text_attn_output.size()}" + ) + + vision_attn_output = vision_attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim) + vision_attn_output = vision_attn_output.transpose(1, 2) + vision_attn_output = vision_attn_output.reshape(batch_size, tgt_len, self.embed_dim) + + text_attn_output = text_attn_output.view(batch_size, self.num_heads, src_len, self.head_dim) + text_attn_output = text_attn_output.transpose(1, 2) + text_attn_output = text_attn_output.reshape(batch_size, src_len, self.embed_dim) + + vision_attn_output = self.out_vision_proj(vision_attn_output) + text_attn_output = self.out_text_proj(text_attn_output) + + return (vision_attn_output, vision_attn_weights), (text_attn_output, text_attn_weights) + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->GroundingDino +class GroundingDinoDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class GroundingDinoFusionLayer(nn.Module): + def __init__(self, config): + super().__init__() + drop_path = config.fusion_droppath + + # pre layer norm + self.layer_norm_vision = nn.LayerNorm(config.d_model, config.layer_norm_eps) + self.layer_norm_text = nn.LayerNorm(config.d_model, config.layer_norm_eps) + self.attn = GroundingDinoBiMultiHeadAttention(config) + + # add layer scale for training stability + self.drop_path = GroundingDinoDropPath(drop_path) if drop_path > 0.0 else nn.Identity() + init_values = 1e-4 + self.vision_param = nn.Parameter(init_values * torch.ones((config.d_model)), requires_grad=True) + self.text_param = nn.Parameter(init_values * torch.ones((config.d_model)), requires_grad=True) + + def forward( + self, + vision_features: torch.FloatTensor, + text_features: torch.FloatTensor, + attention_mask_vision: Optional[torch.BoolTensor] = None, + attention_mask_text: Optional[torch.BoolTensor] = None, + ) -> Tuple[Tuple[torch.FloatTensor, torch.FloatTensor], Tuple[torch.FloatTensor, torch.FloatTensor]]: + """Image and text features fusion + + Args: + vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`): + Projected flattened image features generated by the vision backbone. + text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`): + Projected text features generated by the text encoder. + attention_mask_vision (`torch.BoolTensor`, **optional**): + Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens. + attention_mask_text (`torch.BoolTensor`, **optional**): + Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens. + + Returns: + `tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an enhanced + feature and attention output and weights: + - **vision_features** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, vision_dim)`) -- + Updated vision features with attention output from image-to-text cross-attention layer. + - **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length, + vision_sequence_length)`) -- + Attention weights of the image-to-text cross-attention layer. + - **text_features** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, text_dim)`) -- + Updated text features with attention output from text-to-image cross-attention layer. + - **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length, + text_sequence_length)`) -- + Attention weights of the text-to-image cross-attention layer. + """ + vision_features = self.layer_norm_vision(vision_features) + text_features = self.layer_norm_text(text_features) + (delta_v, vision_attn), (delta_t, text_attn) = self.attn( + vision_features, + text_features, + vision_attention_mask=attention_mask_vision, + text_attention_mask=attention_mask_text, + ) + vision_features = vision_features + self.drop_path(self.vision_param * delta_v) + text_features = text_features + self.drop_path(self.text_param * delta_t) + + return (vision_features, vision_attn), (text_features, text_attn) + + +class GroundingDinoDeformableLayer(nn.Module): + def __init__(self, config: GroundingDinoConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = GroundingDinoMultiscaleDeformableAttention( + config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + position_embeddings: torch.Tensor = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Input to the layer. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Attention mask. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings, to be added to `hidden_states`. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes of the backbone feature maps. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if self.training: + if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + return hidden_states, attn_weights + + +# Based on https://github.com/IDEA-Research/GroundingDINO/blob/2b62f419c292ca9c518daae55512fabc3fead4a4/groundingdino/models/GroundingDINO/utils.py#L24 +def get_sine_pos_embed( + pos_tensor: torch.Tensor, num_pos_feats: int = 128, temperature: int = 10000, exchange_xy: bool = True +) -> Tensor: + """ + Generate sine position embeddings from a position tensor. + + Args: + pos_tensor (torch.Tensor): + Tensor containing positions. Shape: [..., n]. + num_pos_feats (`int`, *optional*, defaults to 128): + Projected shape for each float in the tensor. + temperature (`int`, *optional*, defaults to 10000): + Temperature in the sine/cosine function. + exchange_xy (`bool`, *optional*, defaults to `True`): + Exchange pos x and pos y. For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. + + Returns: + position_embeddings (torch.Tensor): shape: [..., n * hidden_size]. + """ + scale = 2 * math.pi + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) + + def sine_func(x: torch.Tensor): + sin_x = x * scale / dim_t + sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) + return sin_x + + pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1) + position_embeddings = [sine_func(x) for x in pos_tensor] + if exchange_xy: + position_embeddings[0], position_embeddings[1] = position_embeddings[1], position_embeddings[0] + position_embeddings = torch.cat(position_embeddings, dim=-1) + return position_embeddings + + +class GroundingDinoEncoderLayer(nn.Module): + def __init__(self, config) -> None: + super().__init__() + + self.d_model = config.d_model + + self.text_enhancer_layer = GroundingDinoTextEnhancerLayer(config) + self.fusion_layer = GroundingDinoFusionLayer(config) + self.deformable_layer = GroundingDinoDeformableLayer(config) + + def get_text_position_embeddings( + self, + text_features: Tensor, + text_position_embedding: Optional[torch.Tensor], + text_position_ids: Optional[torch.Tensor], + ) -> Tensor: + batch_size, seq_length, _ = text_features.shape + if text_position_embedding is None and text_position_ids is None: + text_position_embedding = torch.arange(seq_length, device=text_features.device) + text_position_embedding = text_position_embedding.float() + text_position_embedding = text_position_embedding.unsqueeze(0).unsqueeze(-1) + text_position_embedding = text_position_embedding.repeat(batch_size, 1, 1) + text_position_embedding = get_sine_pos_embed( + text_position_embedding, num_pos_feats=self.d_model, exchange_xy=False + ) + if text_position_ids is not None: + text_position_embedding = get_sine_pos_embed( + text_position_ids[..., None], num_pos_feats=self.d_model, exchange_xy=False + ) + + return text_position_embedding + + def forward( + self, + vision_features: Tensor, + vision_position_embedding: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + key_padding_mask: Tensor, + reference_points: Tensor, + text_features: Optional[Tensor] = None, + text_attention_mask: Optional[Tensor] = None, + text_position_embedding: Optional[Tensor] = None, + text_self_attention_masks: Optional[Tensor] = None, + text_position_ids: Optional[Tensor] = None, + ): + text_position_embedding = self.get_text_position_embeddings( + text_features, text_position_embedding, text_position_ids + ) + + (vision_features, vision_fused_attn), (text_features, text_fused_attn) = self.fusion_layer( + vision_features=vision_features, + text_features=text_features, + attention_mask_vision=key_padding_mask, + attention_mask_text=text_attention_mask, + ) + + (text_features, text_enhanced_attn) = self.text_enhancer_layer( + hidden_states=text_features, + attention_masks=~text_self_attention_masks, # note we use ~ for mask here + position_embeddings=(text_position_embedding if text_position_embedding is not None else None), + ) + + (vision_features, vision_deformable_attn) = self.deformable_layer( + hidden_states=vision_features, + attention_mask=~key_padding_mask, + position_embeddings=vision_position_embedding, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + ) + + return ( + (vision_features, text_features), + (vision_fused_attn, text_fused_attn, text_enhanced_attn, vision_deformable_attn), + ) + + +class GroundingDinoMultiheadAttention(nn.Module): + """Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.""" + + def __init__(self, config, num_attention_heads=None): + super().__init__() + if config.hidden_size % num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({num_attention_heads})" + ) + + self.num_attention_heads = num_attention_heads + self.attention_head_size = int(config.hidden_size / num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) + + self.dropout = nn.Dropout(config.attention_dropout) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + queries: torch.Tensor, + keys: torch.Tensor, + values: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + query_layer = self.transpose_for_scores(self.query(queries)) + key_layer = self.transpose_for_scores(self.key(keys)) + value_layer = self.transpose_for_scores(self.value(values)) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in GroundingDinoModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + context_layer = self.out_proj(context_layer) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +class GroundingDinoDecoderLayer(nn.Module): + def __init__(self, config: GroundingDinoConfig): + super().__init__() + self.embed_dim = config.d_model + + # self-attention + self.self_attn = GroundingDinoMultiheadAttention(config, num_attention_heads=config.decoder_attention_heads) + + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + # cross-attention text + self.encoder_attn_text = GroundingDinoMultiheadAttention( + config, num_attention_heads=config.decoder_attention_heads + ) + self.encoder_attn_text_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + # cross-attention + self.encoder_attn = GroundingDinoMultiscaleDeformableAttention( + config, + num_heads=config.decoder_attention_heads, + n_points=config.decoder_n_points, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + # feedforward neural networks + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + vision_encoder_hidden_states: Optional[torch.Tensor] = None, + vision_encoder_attention_mask: Optional[torch.Tensor] = None, + text_encoder_hidden_states: Optional[torch.Tensor] = None, + text_encoder_attention_mask: Optional[torch.Tensor] = None, + self_attn_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ): + residual = hidden_states + + # Self Attention + queries = keys = self.with_pos_embed(hidden_states, position_embeddings) + hidden_states, self_attn_weights = self.self_attn( + queries=queries, + keys=keys, + values=hidden_states, + attention_mask=self_attn_mask, + output_attentions=True, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + second_residual = hidden_states + + # Cross-Attention Text + queries = self.with_pos_embed(hidden_states, position_embeddings) + + hidden_states, text_cross_attn_weights = self.encoder_attn_text( + queries=queries, + keys=text_encoder_hidden_states, + values=text_encoder_hidden_states, + # attention_mask=text_encoder_attention_mask, # TODO fix cross-attention mask here + output_attentions=True, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = second_residual + hidden_states + hidden_states = self.encoder_attn_text_layer_norm(hidden_states) + + third_residual = hidden_states + + # Cross-Attention + cross_attn_weights = None + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + attention_mask=vision_encoder_attention_mask, + encoder_hidden_states=vision_encoder_hidden_states, + encoder_attention_mask=vision_encoder_attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = third_residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, text_cross_attn_weights, cross_attn_weights) + + return outputs + + +class GroundingDinoContrastiveEmbedding(nn.Module): + def __init__(self, config): + super().__init__() + self.max_text_len = config.max_text_len + + def forward( + self, + vision_hidden_state: torch.FloatTensor, + text_hidden_state: torch.FloatTensor, + text_token_mask: torch.BoolTensor, + ) -> torch.FloatTensor: + output = vision_hidden_state @ text_hidden_state.transpose(-1, -2) + output = output.masked_fill(~text_token_mask[:, None, :], float("-inf")) + + # padding to max_text_len + new_output = torch.full((*output.shape[:-1], self.max_text_len), float("-inf"), device=output.device) + new_output[..., : output.shape[-1]] = output + + return new_output + + +class GroundingDinoPreTrainedModel(PreTrainedModel): + config_class = GroundingDinoConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + + def _init_weights(self, module): + std = self.config.init_std + + if isinstance(module, GroundingDinoLearnedPositionEmbedding): + nn.init.uniform_(module.row_embeddings.weight) + nn.init.uniform_(module.column_embeddings.weight) + elif isinstance(module, GroundingDinoMultiscaleDeformableAttention): + module._reset_parameters() + elif isinstance(module, GroundingDinoBiMultiHeadAttention): + nn.init.xavier_uniform_(module.vision_proj.weight) + module.vision_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(module.text_proj.weight) + module.text_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(module.values_vision_proj.weight) + module.values_vision_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(module.values_text_proj.weight) + module.values_text_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(module.out_vision_proj.weight) + module.out_vision_proj.bias.data.fill_(0) + nn.init.xavier_uniform_(module.out_text_proj.weight) + module.out_text_proj.bias.data.fill_(0) + elif isinstance(module, (GroundingDinoEncoderLayer, GroundingDinoDecoderLayer)): + for p in module.parameters(): + if p.dim() > 1: + nn.init.normal_(p, mean=0.0, std=std) + elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, GroundingDinoMLPPredictionHead): + nn.init.constant_(module.layers[-1].weight.data, 0) + nn.init.constant_(module.layers[-1].bias.data, 0) + + if hasattr(module, "reference_points") and not self.config.two_stage: + nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) + nn.init.constant_(module.reference_points.bias.data, 0.0) + if hasattr(module, "level_embed"): + nn.init.normal_(module.level_embed) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, GroundingDinoDecoder): + module.gradient_checkpointing = value + + +GROUNDING_DINO_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`GroundingDinoConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GROUNDING_DINO_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. + + Pixel values can be obtained using [`AutoImageProcessor`]. See [`GroundingDinoImageProcessor.__call__`] for + details. + + input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`GroundingDinoTokenizer.__call__`] for details. + + token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token + + [What are token type IDs?](../glossary#token-type-ids) + + attention_mask (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are real (i.e. **not masked**), + - 0 for tokens that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state_vision`, *optional*: `last_hidden_state_text`, *optional*: + `vision_hidden_states`, *optional*: `text_hidden_states`, *optional*: `attentions`) + `last_hidden_state_vision` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence + of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the + decoder. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +class GroundingDinoEncoder(GroundingDinoPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a + [`GroundingDinoEncoderLayer`]. + + The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. + + Args: + config: GroundingDinoConfig + """ + + def __init__(self, config: GroundingDinoConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layers = nn.ModuleList([GroundingDinoEncoderLayer(config) for _ in range(config.encoder_layers)]) + + # Initialize weights and apply final processing + self.post_init() + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """ + Get reference points for each feature map. + + Args: + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Valid ratios of each feature map. + device (`torch.device`): + Device on which to create the tensors. + Returns: + `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` + """ + reference_points_list = [] + for level, (height, width) in enumerate(spatial_shapes): + ref_y, ref_x = meshgrid( + torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), + torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), + indexing="ij", + ) + # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward( + self, + vision_features: Tensor, + vision_attention_mask: Tensor, + vision_position_embedding: Tensor, + spatial_shapes: Tensor, + level_start_index: Tensor, + valid_ratios=None, + text_features: Optional[Tensor] = None, + text_attention_mask: Optional[Tensor] = None, + text_position_embedding: Optional[Tensor] = None, + text_self_attention_masks: Optional[Tensor] = None, + text_position_ids: Optional[Tensor] = None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + vision_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. + vision_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + - 0 for pixel features that are real (i.e. **not masked**), + - 1 for pixel features that are padding (i.e. **masked**). + [What are attention masks?](../glossary#attention-mask) + vision_position_embedding (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): + Starting index of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Ratio of valid area in each feature level. + text_features (`torch.FloatTensor` of shape `(batch_size, text_seq_len, hidden_size)`): + Flattened text features that are passed to the encoder. + text_attention_mask (`torch.Tensor` of shape `(batch_size, text_seq_len)`, *optional*): + Mask to avoid performing attention on padding text features. Mask values selected in `[0, 1]`: + - 0 for text features that are real (i.e. **not masked**), + - 1 for text features that are padding (i.e. **masked**). + [What are attention masks?](../glossary#attention-mask) + text_position_embedding (`torch.FloatTensor` of shape `(batch_size, text_seq_len)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + text_self_attention_masks (`torch.BoolTensor` of shape `(batch_size, text_seq_len, text_seq_len)`): + Masks to avoid performing attention between padding text features. Mask values selected in `[0, 1]`: + - 1 for text features that are real (i.e. **not masked**), + - 0 for text features that are padding (i.e. **masked**). + text_position_ids (`torch.LongTensor` of shape `(batch_size, num_queries)`): + Position ids for text features. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=vision_features.device) + + encoder_vision_states = () if output_hidden_states else None + encoder_text_states = () if output_hidden_states else None + all_attns = () if output_attentions else None + all_attn_fused_text = () if output_attentions else None + all_attn_fused_vision = () if output_attentions else None + all_attn_enhanced_text = () if output_attentions else None + all_attn_deformable = () if output_attentions else None + for i, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_vision_states += (vision_features,) + encoder_text_states += (text_features,) + + (vision_features, text_features), attentions = encoder_layer( + vision_features=vision_features, + vision_position_embedding=vision_position_embedding, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + key_padding_mask=vision_attention_mask, + reference_points=reference_points, + text_features=text_features, + text_attention_mask=text_attention_mask, + text_position_embedding=text_position_embedding, + text_self_attention_masks=text_self_attention_masks, + text_position_ids=text_position_ids, + ) + + if output_attentions: + all_attn_fused_vision += (attentions[0],) + all_attn_fused_text += (attentions[1],) + all_attn_enhanced_text += (attentions[2],) + all_attn_deformable += (attentions[3],) + + if output_hidden_states: + encoder_vision_states += (vision_features,) + encoder_text_states += (text_features,) + + if output_attentions: + all_attns = (all_attn_fused_vision, all_attn_fused_text, all_attn_enhanced_text, all_attn_deformable) + + if not return_dict: + enc_outputs = [vision_features, text_features, encoder_vision_states, encoder_text_states, all_attns] + return tuple(v for v in enc_outputs if v is not None) + return GroundingDinoEncoderOutput( + last_hidden_state_vision=vision_features, + last_hidden_state_text=text_features, + vision_hidden_states=encoder_vision_states, + text_hidden_states=encoder_text_states, + attentions=all_attns, + ) + + +class GroundingDinoDecoder(GroundingDinoPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`GroundingDinoDecoderLayer`]. + + The decoder updates the query embeddings through multiple self-attention and cross-attention layers. + + Some tweaks for Grounding DINO: + + - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. + - it also returns a stack of intermediate outputs and reference points from all decoding layers. + + Args: + config: GroundingDinoConfig + """ + + def __init__(self, config: GroundingDinoConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) + self.layers = nn.ModuleList([GroundingDinoDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.reference_points_head = GroundingDinoMLPPredictionHead( + config.query_dim // 2 * config.d_model, config.d_model, config.d_model, 2 + ) + self.gradient_checkpointing = False + + # hack implementation for iterative bounding box refinement as in two-stage Deformable DETR + self.bbox_embed = None + self.class_embed = None + self.query_scale = None + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + inputs_embeds, + vision_encoder_hidden_states, + vision_encoder_attention_mask=None, + text_encoder_hidden_states=None, + text_encoder_attention_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + self_attn_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + The query embeddings that are passed into the decoder. + vision_encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Last hidden state from encoder related to vision feature map. + vision_encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + - 1 for pixel features that are real (i.e. **not masked**), + - 0 for pixel features that are padding (i.e. **masked**). + text_encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, text_seq_len, hidden_size)`): + Last hidden state from encoder related to text features. + text_encoder_attention_mask (`torch.Tensor` of shape `(batch_size, text_seq_len)`, *optional*): + Mask to avoid performing attention on padding text features. Mask values selected in `[0, 1]`: + - 0 for text features that are real (i.e. **not masked**), + - 1 for text features that are padding (i.e. **masked**). + reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): + Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. + spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of the feature maps. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): + Indexes for the start of each feature level. In range `[0, sequence_length]`. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): + Ratio of valid area in each feature level. + self_attn_mask (`torch.BoolTensor` of shape `(batch_size, text_seq_len)`): + Masks to avoid performing self-attention between vision hidden state. Mask values selected in `[0, 1]`: + - 1 for queries that are real (i.e. **not masked**), + - 0 for queries that are padding (i.e. **masked**). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is not None: + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_attns = () if output_attentions else None + all_cross_attns_vision = () if (output_attentions and vision_encoder_hidden_states is not None) else None + all_cross_attns_text = () if (output_attentions and text_encoder_hidden_states is not None) else None + intermediate = () + intermediate_reference_points = () + + for idx, decoder_layer in enumerate(self.layers): + num_coordinates = reference_points.shape[-1] + if num_coordinates == 4: + reference_points_input = ( + reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] + ) + elif num_coordinates == 2: + reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] + else: + raise ValueError("Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") + query_pos = get_sine_pos_embed(reference_points_input[:, :, 0, :], num_pos_feats=self.config.d_model // 2) + query_pos = self.reference_points_head(query_pos) + + # In original implementation they apply layer norm before outputting intermediate hidden states + # Though that's not through between layers so the layers use as input the output of the previous layer + # withtout layer norm + if output_hidden_states: + all_hidden_states += (self.layer_norm(hidden_states),) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + query_pos, + reference_points_input, + spatial_shapes, + level_start_index, + vision_encoder_hidden_states, + vision_encoder_attention_mask, + text_encoder_hidden_states, + text_encoder_attention_mask, + self_attn_mask, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states=hidden_states, + position_embeddings=query_pos, + reference_points=reference_points_input, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + vision_encoder_hidden_states=vision_encoder_hidden_states, + vision_encoder_attention_mask=vision_encoder_attention_mask, + text_encoder_hidden_states=text_encoder_hidden_states, + text_encoder_attention_mask=text_encoder_attention_mask, + self_attn_mask=self_attn_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + # hack implementation for iterative bounding box refinement + if self.bbox_embed is not None: + tmp = self.bbox_embed[idx](hidden_states) + num_coordinates = reference_points.shape[-1] + if num_coordinates == 4: + new_reference_points = tmp + torch.special.logit(reference_points, eps=1e-5) + new_reference_points = new_reference_points.sigmoid() + elif num_coordinates == 2: + new_reference_points = tmp + new_reference_points[..., :2] = tmp[..., :2] + torch.special.logit(reference_points, eps=1e-5) + new_reference_points = new_reference_points.sigmoid() + else: + raise ValueError( + f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}" + ) + reference_points = new_reference_points.detach() + + intermediate += (self.layer_norm(hidden_states),) + intermediate_reference_points += (reference_points,) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if text_encoder_hidden_states is not None: + all_cross_attns_text += (layer_outputs[2],) + + if vision_encoder_hidden_states is not None: + all_cross_attns_vision += (layer_outputs[3],) + + # Keep batch_size as first dimension + intermediate = torch.stack(intermediate, dim=1) + intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if output_attentions: + all_attns += (all_self_attns, all_cross_attns_text, all_cross_attns_vision) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + intermediate, + intermediate_reference_points, + all_hidden_states, + all_attns, + ] + if v is not None + ) + return GroundingDinoDecoderOutput( + last_hidden_state=hidden_states, + intermediate_hidden_states=intermediate, + intermediate_reference_points=intermediate_reference_points, + hidden_states=all_hidden_states, + attentions=all_attns, + ) + + +# these correspond to [CLS], [SEP], . and ? +SPECIAL_TOKENS = [101, 102, 1012, 1029] + + +def generate_masks_with_special_tokens_and_transfer_map(input_ids: torch.LongTensor) -> Tuple[Tensor, Tensor]: + """Generate attention mask between each pair of special tokens and positional ids. + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + Returns: + `tuple(torch.Tensor)` comprising attention mask between each special tokens and position_ids: + - **attention_mask** (`torch.BoolTensor` of shape `(batch_size, sequence_length, sequence_length)`) + - **position_ids** (`torch.LongTensor` of shape `(batch_size, sequence_length)`) + """ + batch_size, num_token = input_ids.shape + # special_tokens_mask: batch_size, num_token. 1 for special tokens. 0 for normal tokens + special_tokens_mask = torch.zeros((batch_size, num_token), device=input_ids.device).bool() + for special_token in SPECIAL_TOKENS: + special_tokens_mask |= input_ids == special_token + + # idxs: each row is a list of indices of special tokens + idxs = torch.nonzero(special_tokens_mask) + + # generate attention mask and positional ids + attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(batch_size, 1, 1) + position_ids = torch.zeros((batch_size, num_token), device=input_ids.device) + previous_col = 0 + for i in range(idxs.shape[0]): + row, col = idxs[i] + if (col == 0) or (col == num_token - 1): + attention_mask[row, col, col] = True + position_ids[row, col] = 0 + else: + attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True + position_ids[row, previous_col + 1 : col + 1] = torch.arange( + 0, col - previous_col, device=input_ids.device + ) + + previous_col = col + + return attention_mask, position_ids.to(torch.long) + + +@add_start_docstrings( + """ + The bare Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) outputting raw + hidden-states without any specific head on top. + """, + GROUNDING_DINO_START_DOCSTRING, +) +class GroundingDinoModel(GroundingDinoPreTrainedModel): + def __init__(self, config: GroundingDinoConfig): + super().__init__(config) + + # Create backbone + positional encoding + backbone = GroundingDinoConvEncoder(config) + position_embeddings = build_position_encoding(config) + self.backbone = GroundingDinoConvModel(backbone, position_embeddings) + + # Create input projection layers + if config.num_feature_levels > 1: + num_backbone_outs = len(backbone.intermediate_channel_sizes) + input_proj_list = [] + for i in range(num_backbone_outs): + in_channels = backbone.intermediate_channel_sizes[i] + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ) + for _ in range(config.num_feature_levels - num_backbone_outs): + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), + nn.GroupNorm(32, config.d_model), + ) + ) + in_channels = config.d_model + self.input_proj_vision = nn.ModuleList(input_proj_list) + else: + self.input_proj_vision = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ] + ) + + # Create text backbone + self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False) + self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model) + + if config.embedding_init_target or not config.two_stage: + self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) + + self.encoder = GroundingDinoEncoder(config) + self.decoder = GroundingDinoDecoder(config) + + self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) + + if config.two_stage: + self.enc_output = nn.Linear(config.d_model, config.d_model) + self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) + if ( + config.two_stage_bbox_embed_share + and config.decoder_bbox_embed_share + and self.decoder.bbox_embed is not None + ): + self.encoder_output_bbox_embed = self.decoder.bbox_embed + else: + self.encoder_output_bbox_embed = GroundingDinoMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + + self.encoder_output_class_embed = GroundingDinoContrastiveEmbedding(config) + else: + self.reference_points = nn.Embedding(config.num_queries, 4) + + self.post_init() + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def freeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(False) + + def unfreeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(True) + + def get_valid_ratio(self, mask): + """Get the valid ratio of all feature maps.""" + + _, height, width = mask.shape + valid_height = torch.sum(mask[:, :, 0], 1) + valid_width = torch.sum(mask[:, 0, :], 1) + valid_ratio_heigth = valid_height.float() / height + valid_ratio_width = valid_width.float() / width + valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) + return valid_ratio + + def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): + """Generate the encoder output proposals from encoded enc_output. + + Args: + enc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder. + padding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`. + spatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps. + + Returns: + `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. + - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to + directly predict a bounding box. (without the need of a decoder) + - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse + sigmoid. + """ + batch_size = enc_output.shape[0] + proposals = [] + current_position = 0 + for level, (height, width) in enumerate(spatial_shapes): + mask_flatten_ = padding_mask[:, current_position : (current_position + height * width)] + mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1) + valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = meshgrid( + torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), + torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), + indexing="ij", + ) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale + width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) + proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) + proposals.append(proposal) + current_position += height * width + + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid + output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) + output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) + + # assign each pixel as an object query + object_query = enc_output + object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) + object_query = object_query.masked_fill(~output_proposals_valid, float(0)) + object_query = self.enc_output_norm(self.enc_output(object_query)) + return object_query, output_proposals + + @add_start_docstrings_to_model_forward(GROUNDING_DINO_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=GroundingDinoModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Tensor, + input_ids: Tensor, + token_type_ids: Optional[Tensor] = None, + attention_mask: Optional[Tensor] = None, + pixel_mask: Optional[Tensor] = None, + encoder_outputs=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, AutoModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> text = "a cat." + + >>> processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny") + >>> model = AutoModel.from_pretrained("IDEA-Research/grounding-dino-tiny") + + >>> inputs = processor(images=image, text=text, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 900, 256] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_self_attention_masks, position_ids = generate_masks_with_special_tokens_and_transfer_map(input_ids) + + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + text_token_mask = attention_mask.bool() # just to avoid renaming everywhere + + max_text_len = self.config.max_text_len + if text_self_attention_masks.shape[1] > max_text_len: + text_self_attention_masks = text_self_attention_masks[:, :max_text_len, :max_text_len] + position_ids = position_ids[:, :max_text_len] + input_ids = input_ids[:, :max_text_len] + token_type_ids = token_type_ids[:, :max_text_len] + text_token_mask = text_token_mask[:, :max_text_len] + + # Extract text features from text backbone + text_outputs = self.text_backbone( + input_ids, text_self_attention_masks, token_type_ids, position_ids, return_dict=return_dict + ) + text_features = text_outputs.last_hidden_state if return_dict else text_outputs[0] + text_features = self.text_projection(text_features) + + batch_size, num_channels, height, width = pixel_values.shape + device = pixel_values.device + + if pixel_mask is None: + pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) + + # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) + # First, sent pixel_values + pixel_mask through Backbone to obtain the features + # which is a list of tuples + vision_features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) + + # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + feature_maps = [] + masks = [] + for level, (source, mask) in enumerate(vision_features): + feature_maps.append(self.input_proj_vision[level](source)) + masks.append(mask) + + # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage + if self.config.num_feature_levels > len(feature_maps): + _len_sources = len(feature_maps) + for level in range(_len_sources, self.config.num_feature_levels): + if level == _len_sources: + source = self.input_proj_vision[level](vision_features[-1][0]) + else: + source = self.input_proj_vision[level](feature_maps[-1]) + mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] + pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) + feature_maps.append(source) + masks.append(mask) + position_embeddings_list.append(pos_l) + + # Create queries + query_embeds = None + if self.config.embedding_init_target or self.config.two_stage: + query_embeds = self.query_position_embeddings.weight + + # Prepare encoder inputs (by flattening) + source_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for level, (source, mask, pos_embed) in enumerate(zip(feature_maps, masks, position_embeddings_list)): + batch_size, num_channels, height, width = source.shape + spatial_shape = (height, width) + spatial_shapes.append(spatial_shape) + source = source.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + source_flatten.append(source) + mask_flatten.append(mask) + source_flatten = torch.cat(source_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) + valid_ratios = valid_ratios.float() + + # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder + # Also provide spatial_shapes, level_start_index and valid_ratios + if encoder_outputs is None: + encoder_outputs = self.encoder( + vision_features=source_flatten, + vision_attention_mask=~mask_flatten, + vision_position_embedding=lvl_pos_embed_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + text_features=text_features, + text_attention_mask=~text_token_mask, + text_position_embedding=None, + text_self_attention_masks=~text_self_attention_masks, + text_position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a GroundingDinoEncoderOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, GroundingDinoEncoderOutput): + encoder_outputs = GroundingDinoEncoderOutput( + last_hidden_state_vision=encoder_outputs[0], + last_hidden_state_text=encoder_outputs[1], + vision_hidden_states=encoder_outputs[2] if output_hidden_states else None, + text_hidden_states=encoder_outputs[3] if output_hidden_states else None, + attentions=encoder_outputs[-1] if output_attentions else None, + ) + + # Fifth, prepare decoder inputs + enc_outputs_class = None + enc_outputs_coord_logits = None + if self.config.two_stage: + object_query_embedding, output_proposals = self.generate_encoder_output_proposals( + encoder_outputs[0], ~mask_flatten, spatial_shapes + ) + + # hack implementation as in two-stage Deformable DETR + # apply a detection head to each pixel (A.4 in paper) + # linear projection for bounding box binary classification (i.e. foreground and background) + enc_outputs_class = self.encoder_output_class_embed( + object_query_embedding, encoder_outputs[1], text_token_mask + ) + # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) + delta_bbox = self.encoder_output_bbox_embed(object_query_embedding) + enc_outputs_coord_logits = delta_bbox + output_proposals + + # only keep top scoring `config.num_queries` proposals + topk = self.config.num_queries + topk_logits = enc_outputs_class.max(-1)[0] + topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] + topk_coords_logits = torch.gather( + enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) + ) + + topk_coords_logits = topk_coords_logits.detach() + reference_points = topk_coords_logits.sigmoid() + init_reference_points = reference_points + if query_embeds is not None: + target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1) + else: + target = torch.gather( + object_query_embedding, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model) + ).detach() + else: + target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1) + reference_points = self.reference_points.weight.unsqueeze(0).repeat(batch_size, 1, 1).sigmoid() + init_reference_points = reference_points + + decoder_outputs = self.decoder( + inputs_embeds=target, + vision_encoder_hidden_states=encoder_outputs[0], + vision_encoder_attention_mask=mask_flatten, + text_encoder_hidden_states=encoder_outputs[1], + text_encoder_attention_mask=~text_token_mask, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + self_attn_mask=None, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) + tuple_outputs = ( + (decoder_outputs[0], init_reference_points) + decoder_outputs[1:] + encoder_outputs + enc_outputs + ) + + return tuple_outputs + + return GroundingDinoModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + init_reference_points=init_reference_points, + intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, + intermediate_reference_points=decoder_outputs.intermediate_reference_points, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + encoder_last_hidden_state_vision=encoder_outputs.last_hidden_state_vision, + encoder_last_hidden_state_text=encoder_outputs.last_hidden_state_text, + encoder_vision_hidden_states=encoder_outputs.vision_hidden_states, + encoder_text_hidden_states=encoder_outputs.text_hidden_states, + encoder_attentions=encoder_outputs.attentions, + enc_outputs_class=enc_outputs_class, + enc_outputs_coord_logits=enc_outputs_coord_logits, + ) + + +# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead +class GroundingDinoMLPPredictionHead(nn.Module): + """ + Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, + height and width of a bounding box w.r.t. an image. + + Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py + + """ + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +# Copied from transformers.models.detr.modeling_detr._upcast +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +# Copied from transformers.models.detr.modeling_detr.box_area +def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. + + Args: + boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): + Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 + < x2` and `0 <= y1 < y2`. + + Returns: + `torch.FloatTensor`: a tensor containing the area for each box. + """ + boxes = _upcast(boxes) + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# Copied from transformers.models.detr.modeling_detr.box_iou +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] + inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +# Copied from transformers.models.detr.modeling_detr.generalized_box_iou +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. + + Returns: + `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") + iou, union = box_iou(boxes1, boxes2) + + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] + + return iou - (area - union) / area + + +# Copied from transformers.models.detr.modeling_detr._max_by_axis +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +# Copied from transformers.models.detr.modeling_detr.dice_loss +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs (0 for the negative class and 1 for the positive + class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + + Args: + inputs (`torch.FloatTensor` of arbitrary shape): + The predictions for each example. + targets (`torch.FloatTensor` with the same shape as `inputs`) + A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class + and 1 for the positive class). + alpha (`float`, *optional*, defaults to `0.25`): + Optional weighting factor in the range (0,1) to balance positive vs. negative examples. + gamma (`int`, *optional*, defaults to `2`): + Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. + + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + # add modulating factor + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +# Copied from transformers.models.detr.modeling_detr.NestedTensor +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + if tensor_list[0].ndim == 3: + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + batch_shape = [len(tensor_list)] + max_size + batch_size, num_channels, height, width = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("Only 3-dimensional tensors are supported") + return NestedTensor(tensor, mask) + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->GroundingDino +class GroundingDinoHungarianMatcher(nn.Module): + """ + This class computes an assignment between the targets and the predictions of the network. + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more + predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + Args: + class_cost: + The relative weight of the classification error in the matching cost. + bbox_cost: + The relative weight of the L1 error of the bounding box coordinates in the matching cost. + giou_cost: + The relative weight of the giou loss of the bounding box in the matching cost. + """ + + def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): + super().__init__() + requires_backends(self, ["scipy"]) + + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: + raise ValueError("All costs of the Matcher can't be 0") + + @torch.no_grad() + def forward(self, outputs, targets): + """ + Args: + outputs (`dict`): + A dictionary that contains at least these entries: + * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. + targets (`List[dict]`): + A list of targets (len(targets) = batch_size), where each target is a dict containing: + * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of + ground-truth + objects in the target) containing the class labels + * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. + + Returns: + `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + batch_size, num_queries = outputs["logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + target_ids = torch.cat([v["class_labels"] for v in targets]) + target_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. + alpha = 0.25 + gamma = 2.0 + neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) + pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] + + # Compute the L1 cost between boxes + bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) + + # Compute the giou cost between boxes + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) + + # Final cost matrix + cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost + cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss with DeformableDetr->GroundingDino +class GroundingDinoLoss(nn.Module): + """ + This class computes the losses for `GroundingDinoForObjectDetection`. The process happens in two steps: 1) we + compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of + matched ground-truth / prediction (supervise class and box). + + Args: + matcher (`GroundingDinoHungarianMatcher`): + Module able to compute a matching between targets and proposals. + num_classes (`int`): + Number of object categories, omitting the special no-object category. + focal_alpha (`float`): + Alpha parameter in focal loss. + losses (`List[str]`): + List of all the losses to be applied. See `get_loss` for a list of all available losses. + """ + + def __init__(self, matcher, num_classes, focal_alpha, losses): + super().__init__() + self.matcher = matcher + self.num_classes = num_classes + self.focal_alpha = focal_alpha + self.losses = losses + + # removed logging parameter, which was part of the original implementation + def loss_labels(self, outputs, targets, indices, num_boxes): + """ + Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor + of dim [nb_target_boxes] + """ + if "logits" not in outputs: + raise KeyError("No logits were found in the outputs") + source_logits = outputs["logits"] + + idx = self._get_source_permutation_idx(indices) + target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full( + source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device + ) + target_classes[idx] = target_classes_o + + target_classes_onehot = torch.zeros( + [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], + dtype=source_logits.dtype, + layout=source_logits.layout, + device=source_logits.device, + ) + target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) + + target_classes_onehot = target_classes_onehot[:, :, :-1] + loss_ce = ( + sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) + * source_logits.shape[1] + ) + losses = {"loss_ce": loss_ce} + + return losses + + @torch.no_grad() + # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ + Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. + + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. + """ + logits = outputs["logits"] + device = logits.device + target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) + card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) + losses = {"cardinality_error": card_err} + return losses + + # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes + def loss_boxes(self, outputs, targets, indices, num_boxes): + """ + Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. + + Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes + are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + if "pred_boxes" not in outputs: + raise KeyError("No predicted boxes found in outputs") + idx = self._get_source_permutation_idx(indices) + source_boxes = outputs["pred_boxes"][idx] + target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") + + losses = {} + losses["loss_bbox"] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag( + generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) + ) + losses["loss_giou"] = loss_giou.sum() / num_boxes + return losses + + # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx + def _get_source_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) + source_idx = torch.cat([source for (source, _) in indices]) + return batch_idx, source_idx + + # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx + def _get_target_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) + target_idx = torch.cat([target for (_, target) in indices]) + return batch_idx, target_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes): + loss_map = { + "labels": self.loss_labels, + "cardinality": self.loss_cardinality, + "boxes": self.loss_boxes, + } + if loss not in loss_map: + raise ValueError(f"Loss {loss} not supported") + return loss_map[loss](outputs, targets, indices, num_boxes) + + def forward(self, outputs, targets): + """ + This performs the loss computation. + + Args: + outputs (`dict`, *optional*): + Dictionary of tensors, see the output specification of the model for the format. + targets (`List[dict]`, *optional*): + List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the + losses applied, see each loss' doc. + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs" and k != "enc_outputs"} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["class_labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + world_size = 1 + if is_accelerate_available(): + if PartialState._shared_state != {}: + num_boxes = reduce(num_boxes) + world_size = PartialState().num_processes + num_boxes = torch.clamp(num_boxes / world_size, min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "auxiliary_outputs" in outputs: + for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): + indices = self.matcher(auxiliary_outputs, targets) + for loss in self.losses: + l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + if "enc_outputs" in outputs: + enc_outputs = outputs["enc_outputs"] + bin_targets = copy.deepcopy(targets) + for bt in bin_targets: + bt["class_labels"] = torch.zeros_like(bt["class_labels"]) + indices = self.matcher(enc_outputs, bin_targets) + for loss in self.losses: + l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes) + l_dict = {k + "_enc": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +@add_start_docstrings( + """ + Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, + for tasks such as COCO detection. + """, + GROUNDING_DINO_START_DOCSTRING, +) +class GroundingDinoForObjectDetection(GroundingDinoPreTrainedModel): + # When using clones, all layers > 0 will be clones, but layer 0 *is* required + # the bbox_embed in the decoder are all clones though + _tied_weights_keys = [r"bbox_embed\.[1-9]\d*", r"model\.decoder\.bbox_embed\.[0-9]\d*"] + + def __init__(self, config: GroundingDinoConfig): + super().__init__(config) + + self.model = GroundingDinoModel(config) + _class_embed = GroundingDinoContrastiveEmbedding(config) + + if config.decoder_bbox_embed_share: + _bbox_embed = GroundingDinoMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + self.bbox_embed = nn.ModuleList([_bbox_embed for _ in range(config.decoder_layers)]) + else: + for _ in range(config.decoder_layers): + _bbox_embed = GroundingDinoMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + self.bbox_embed = nn.ModuleList([_bbox_embed for _ in range(config.decoder_layers)]) + self.class_embed = nn.ModuleList([_class_embed for _ in range(config.decoder_layers)]) + # hack for box-refinement + self.model.decoder.bbox_embed = self.bbox_embed + # hack implementation for two-stage + self.model.decoder.class_embed = self.class_embed + + # Initialize weights and apply final processing + self.post_init() + + # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + @add_start_docstrings_to_model_forward(GROUNDING_DINO_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=GroundingDinoObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.FloatTensor, + input_ids: torch.LongTensor, + token_type_ids: torch.LongTensor = None, + attention_mask: torch.LongTensor = None, + pixel_mask: Optional[torch.BoolTensor] = None, + encoder_outputs: Optional[Union[GroundingDinoEncoderOutput, Tuple]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: List[Dict[str, Union[torch.LongTensor, torch.FloatTensor]]] = None, + ): + r""" + labels (`List[Dict]` of len `(batch_size,)`, *optional*): + Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the + following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch + respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes + in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, GroundingDinoForObjectDetection + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> text = "a cat." + + >>> processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny") + >>> model = GroundingDinoForObjectDetection.from_pretrained("IDEA-Research/grounding-dino-tiny") + + >>> inputs = processor(images=image, text=text, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> # convert outputs (bounding boxes and class logits) to COCO API + >>> target_sizes = torch.tensor([image.size[::-1]]) + >>> results = processor.image_processor.post_process_object_detection( + ... outputs, threshold=0.35, target_sizes=target_sizes + ... )[0] + >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): + ... box = [round(i, 1) for i in box.tolist()] + ... print(f"Detected {label.item()} with confidence " f"{round(score.item(), 2)} at location {box}") + Detected 1 with confidence 0.45 at location [344.8, 23.2, 637.4, 373.8] + Detected 1 with confidence 0.41 at location [11.9, 51.6, 316.6, 472.9] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + + # First, sent images through Grounding DINO base model to obtain encoder + decoder outputs + outputs = self.model( + pixel_values=pixel_values, + input_ids=input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + pixel_mask=pixel_mask, + encoder_outputs=encoder_outputs, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + idx = 5 + (1 if output_attentions else 0) + (1 if output_hidden_states else 0) + enc_text_hidden_state = outputs.encoder_last_hidden_state_text if return_dict else outputs[idx] + hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] + init_reference_points = outputs.init_reference_points if return_dict else outputs[1] + inter_references_points = outputs.intermediate_reference_points if return_dict else outputs[3] + + # class logits + predicted bounding boxes + outputs_classes = [] + outputs_coords = [] + + # hidden_states are of shape (batch_size, num_stages, height, width) + # predict class and bounding box deltas for each stage + num_levels = hidden_states.shape[1] + for level in range(num_levels): + if level == 0: + reference = init_reference_points + else: + reference = inter_references_points[:, level - 1] + reference = torch.special.logit(reference, eps=1e-5) + outputs_class = self.class_embed[level]( + vision_hidden_state=hidden_states[:, level], + text_hidden_state=enc_text_hidden_state, + text_token_mask=attention_mask.bool(), + ) + delta_bbox = self.bbox_embed[level](hidden_states[:, level]) + + reference_coordinates = reference.shape[-1] + if reference_coordinates == 4: + outputs_coord_logits = delta_bbox + reference + elif reference_coordinates == 2: + delta_bbox[..., :2] += reference + outputs_coord_logits = delta_bbox + else: + raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") + outputs_coord = outputs_coord_logits.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + outputs_class = torch.stack(outputs_classes) + outputs_coord = torch.stack(outputs_coords) + + logits = outputs_class[-1] + pred_boxes = outputs_coord[-1] + + loss, loss_dict, auxiliary_outputs = None, None, None + if labels is not None: + # First: create the matcher + matcher = GroundingDinoHungarianMatcher( + class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost + ) + # Second: create the criterion + losses = ["labels", "boxes", "cardinality"] + criterion = GroundingDinoLoss( + matcher=matcher, + num_classes=self.config.num_labels, + focal_alpha=self.config.focal_alpha, + losses=losses, + ) + criterion.to(self.device) + # Third: compute the losses, based on outputs and labels + outputs_loss = {} + outputs_loss["logits"] = logits + outputs_loss["pred_boxes"] = pred_boxes + if self.config.auxiliary_loss: + auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) + outputs_loss["auxiliary_outputs"] = auxiliary_outputs + if self.config.two_stage: + enc_outputs_coord = outputs[-1].sigmoid() + outputs_loss["enc_outputs"] = {"logits": outputs[-2], "pred_boxes": enc_outputs_coord} + + loss_dict = criterion(outputs_loss, labels) + # Fourth: compute total loss, as a weighted sum of the various losses + weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} + weight_dict["loss_giou"] = self.config.giou_loss_coefficient + if self.config.auxiliary_loss: + aux_weight_dict = {} + for i in range(self.config.decoder_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + if not return_dict: + if auxiliary_outputs is not None: + output = (logits, pred_boxes) + auxiliary_outputs + outputs + else: + output = (logits, pred_boxes) + outputs + tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output + + return tuple_outputs + + dict_outputs = GroundingDinoObjectDetectionOutput( + loss=loss, + loss_dict=loss_dict, + logits=logits, + pred_boxes=pred_boxes, + last_hidden_state=outputs.last_hidden_state, + auxiliary_outputs=auxiliary_outputs, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + encoder_last_hidden_state_vision=outputs.encoder_last_hidden_state_vision, + encoder_last_hidden_state_text=outputs.encoder_last_hidden_state_text, + encoder_vision_hidden_states=outputs.encoder_vision_hidden_states, + encoder_text_hidden_states=outputs.encoder_text_hidden_states, + encoder_attentions=outputs.encoder_attentions, + intermediate_hidden_states=outputs.intermediate_hidden_states, + intermediate_reference_points=outputs.intermediate_reference_points, + init_reference_points=outputs.init_reference_points, + enc_outputs_class=outputs.enc_outputs_class, + enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, + ) + + return dict_outputs diff --git a/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/processing_grounding_dino.py b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/processing_grounding_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..44b99811d931ce5876b469fc20bc066730d5b63b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/grounding_dino/processing_grounding_dino.py @@ -0,0 +1,228 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for Grounding DINO. +""" + +from typing import List, Optional, Tuple, Union + +from ...image_processing_utils import BatchFeature +from ...image_transforms import center_to_corners_format +from ...image_utils import ImageInput +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy +from ...utils import TensorType, is_torch_available + + +if is_torch_available(): + import torch + + +def get_phrases_from_posmap(posmaps, input_ids): + """Get token ids of phrases from posmaps and input_ids. + + Args: + posmaps (`torch.BoolTensor` of shape `(num_boxes, hidden_size)`): + A boolean tensor of text-thresholded logits related to the detected bounding boxes. + input_ids (`torch.LongTensor`) of shape `(sequence_length, )`): + A tensor of token ids. + """ + left_idx = 0 + right_idx = posmaps.shape[-1] - 1 + + # Avoiding altering the input tensor + posmaps = posmaps.clone() + + posmaps[:, 0 : left_idx + 1] = False + posmaps[:, right_idx:] = False + + token_ids = [] + for posmap in posmaps: + non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() + token_ids.append([input_ids[i] for i in non_zero_idx]) + + return token_ids + + +class GroundingDinoProcessor(ProcessorMixin): + r""" + Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a + single processor. + + [`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and + [`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`] + for more information. + + Args: + image_processor (`GroundingDinoImageProcessor`): + An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input. + tokenizer (`AutoTokenizer`): + An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "GroundingDinoImageProcessor" + tokenizer_class = "AutoTokenizer" + + def __init__(self, image_processor, tokenizer): + super().__init__(image_processor, tokenizer) + + def __call__( + self, + images: ImageInput = None, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_token_type_ids: bool = True, + return_length: bool = False, + verbose: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchEncoding: + """ + This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and + [`BertTokenizerFast.__call__`] to prepare text for the model. + + Please refer to the docstring of the above two methods for more information. + """ + if images is None and text is None: + raise ValueError("You have to specify either images or text.") + + # Get only text + if images is not None: + encoding_image_processor = self.image_processor(images, return_tensors=return_tensors) + else: + encoding_image_processor = BatchFeature() + + if text is not None: + text_encoding = self.tokenizer( + text=text, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_token_type_ids=return_token_type_ids, + return_length=return_length, + verbose=verbose, + return_tensors=return_tensors, + **kwargs, + ) + else: + text_encoding = BatchEncoding() + + text_encoding.update(encoding_image_processor) + + return text_encoding + + # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + def post_process_grounded_object_detection( + self, + outputs, + input_ids, + box_threshold: float = 0.25, + text_threshold: float = 0.25, + target_sizes: Union[TensorType, List[Tuple]] = None, + ): + """ + Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, + bottom_right_x, bottom_right_y) format and get the associated text label. + + Args: + outputs ([`GroundingDinoObjectDetectionOutput`]): + Raw outputs of the model. + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The token ids of the input text. + box_threshold (`float`, *optional*, defaults to 0.25): + Score threshold to keep object detection predictions. + text_threshold (`float`, *optional*, defaults to 0.25): + Score threshold to keep text detection predictions. + target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): + Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size + `(height, width)` of each image in the batch. If unset, predictions will not be resized. + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + logits, boxes = outputs.logits, outputs.pred_boxes + + if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + probs = torch.sigmoid(logits) # (batch_size, num_queries, 256) + scores = torch.max(probs, dim=-1)[0] # (batch_size, num_queries) + + # Convert to [x0, y0, x1, y1] format + boxes = center_to_corners_format(boxes) + + # Convert from relative [0, 1] to absolute [0, height] coordinates + if target_sizes is not None: + if isinstance(target_sizes, List): + img_h = torch.Tensor([i[0] for i in target_sizes]) + img_w = torch.Tensor([i[1] for i in target_sizes]) + else: + img_h, img_w = target_sizes.unbind(1) + + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) + boxes = boxes * scale_fct[:, None, :] + + results = [] + for idx, (s, b, p) in enumerate(zip(scores, boxes, probs)): + score = s[s > box_threshold] + box = b[s > box_threshold] + prob = p[s > box_threshold] + label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx]) + label = self.batch_decode(label_ids) + results.append({"scores": score, "labels": label, "boxes": box}) + + return results diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49649c92865da68401a34d21ded541f1faa6c663 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig", "SegGptOnnxConfig"] +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_seggpt"] = [ + "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SegGptModel", + "SegGptPreTrainedModel", + "SegGptForImageSegmentation", + ] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"] + +if TYPE_CHECKING: + from .configuration_seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig, SegGptOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_seggpt import ( + SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + SegGptForImageSegmentation, + SegGptModel, + SegGptPreTrainedModel, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_seggpt import SegGptImageProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..209375eba1da91868698d9244c77c378f8c5ccde Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eb6a41e60b7b8da80c9cf1a96de0651b58ed488 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/convert_seggpt_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/convert_seggpt_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d4de05d497d77acbf8ac60ed14a1a355325526 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/convert_seggpt_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6da27cde48ca2859cb7d920da64702de9a0d1c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d209f1263f2462f68e16b078a08dc81f5d640ab9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/configuration_seggpt.py b/venv/lib/python3.10/site-packages/transformers/models/seggpt/configuration_seggpt.py new file mode 100644 index 0000000000000000000000000000000000000000..38607d775a65826e1f2753aa5aed91303aa7a38d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seggpt/configuration_seggpt.py @@ -0,0 +1,144 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" SegGpt model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class SegGptConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SegGptModel`]. It is used to instantiate a SegGPT + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the SegGPT + [BAAI/seggpt-vit-large](https://huggingface.co/BAAI/seggpt-vit-large) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`List[int]`, *optional*, defaults to `[896, 448]`): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + mlp_dim (`int`, *optional*): + The dimensionality of the MLP layer in the Transformer encoder. If unset, defaults to + `hidden_size` * 4. + drop_path_rate (`float`, *optional*, defaults to 0.1): + The drop path rate for the dropout layers. + pretrain_image_size (`int`, *optional*, defaults to 224): + The pretrained size of the absolute position embeddings. + decoder_hidden_size (`int`, *optional*, defaults to 64): + Hidden size for decoder. + use_relative_position_embeddings (`bool`, *optional*, defaults to `True`): + Whether to use relative position embeddings in the attention layers. + merge_index (`int`, *optional*, defaults to 2): + The index of the encoder layer to merge the embeddings. + intermediate_hidden_state_indices (`List[int]`, *optional*, defaults to `[5, 11, 17, 23]`): + The indices of the encoder layers which we store as features for the decoder. + beta (`float`, *optional*, defaults to 0.01): + Regularization factor for SegGptLoss (smooth-l1 loss). + + Example: + + ```python + >>> from transformers import SegGptConfig, SegGptModel + + >>> # Initializing a SegGPT seggpt-vit-large style configuration + >>> configuration = SegGptConfig() + + >>> # Initializing a model (with random weights) from the seggpt-vit-large style configuration + >>> model = SegGptModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "seggpt" + + def __init__( + self, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + hidden_act="gelu", + hidden_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-6, + image_size=[896, 448], + patch_size=16, + num_channels=3, + qkv_bias=True, + mlp_dim=None, + drop_path_rate=0.1, + pretrain_image_size=224, + decoder_hidden_size=64, + use_relative_position_embeddings=True, + merge_index=2, + intermediate_hidden_state_indices=[5, 11, 17, 23], + beta=0.01, + **kwargs, + ): + super().__init__(**kwargs) + + if merge_index > min(intermediate_hidden_state_indices): + raise ValueError( + f"Merge index must be less than the minimum encoder output index, but got {merge_index=} and {intermediate_hidden_state_indices=}" + ) + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias + self.drop_path_rate = drop_path_rate + self.pretrain_image_size = pretrain_image_size + self.decoder_hidden_size = decoder_hidden_size + self.use_relative_position_embeddings = use_relative_position_embeddings + self.merge_index = merge_index + self.intermediate_hidden_state_indices = intermediate_hidden_state_indices + self.beta = beta + self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/convert_seggpt_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/seggpt/convert_seggpt_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..a13372dfbb1db1f8de27a1fa70745f30f71a20a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seggpt/convert_seggpt_to_hf.py @@ -0,0 +1,222 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert SegGPT checkpoints from the original repository. + +URL: https://github.com/baaivision/Painter/tree/main/SegGPT +""" + + +import argparse + +import requests +import torch +from PIL import Image + +from transformers import SegGptConfig, SegGptForImageSegmentation, SegGptImageProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config): + rename_keys = [] + + # fmt: off + + # rename embedding and its parameters + rename_keys.append(("patch_embed.proj.weight", "model.embeddings.patch_embeddings.projection.weight")) + rename_keys.append(("patch_embed.proj.bias", "model.embeddings.patch_embeddings.projection.bias")) + rename_keys.append(("mask_token", "model.embeddings.mask_token")) + rename_keys.append(("segment_token_x", "model.embeddings.segment_token_input")) + rename_keys.append(("segment_token_y", "model.embeddings.segment_token_prompt")) + rename_keys.append(("type_token_cls", "model.embeddings.type_token_semantic")) + rename_keys.append(("type_token_ins", "model.embeddings.type_token_instance")) + rename_keys.append(("pos_embed", "model.embeddings.position_embeddings")) + + # rename decoder and other + rename_keys.append(("norm.weight", "model.encoder.layernorm.weight")) + rename_keys.append(("norm.bias", "model.encoder.layernorm.bias")) + rename_keys.append(("decoder_embed.weight", "decoder.decoder_embed.weight")) + rename_keys.append(("decoder_embed.bias", "decoder.decoder_embed.bias")) + rename_keys.append(("decoder_pred.0.weight", "decoder.decoder_pred.conv.weight")) + rename_keys.append(("decoder_pred.0.bias", "decoder.decoder_pred.conv.bias")) + rename_keys.append(("decoder_pred.1.weight", "decoder.decoder_pred.layernorm.weight")) + rename_keys.append(("decoder_pred.1.bias", "decoder.decoder_pred.layernorm.bias")) + rename_keys.append(("decoder_pred.3.weight", "decoder.decoder_pred.head.weight")) + rename_keys.append(("decoder_pred.3.bias", "decoder.decoder_pred.head.bias")) + + # rename blocks + for i in range(config.num_hidden_layers): + rename_keys.append((f"blocks.{i}.attn.qkv.weight", f"model.encoder.layers.{i}.attention.qkv.weight")) + rename_keys.append((f"blocks.{i}.attn.qkv.bias", f"model.encoder.layers.{i}.attention.qkv.bias")) + rename_keys.append((f"blocks.{i}.attn.proj.weight", f"model.encoder.layers.{i}.attention.proj.weight")) + rename_keys.append((f"blocks.{i}.attn.proj.bias", f"model.encoder.layers.{i}.attention.proj.bias")) + rename_keys.append((f"blocks.{i}.attn.rel_pos_h", f"model.encoder.layers.{i}.attention.rel_pos_h")) + rename_keys.append((f"blocks.{i}.attn.rel_pos_w", f"model.encoder.layers.{i}.attention.rel_pos_w")) + + rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"model.encoder.layers.{i}.mlp.lin1.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"model.encoder.layers.{i}.mlp.lin1.bias")) + rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"model.encoder.layers.{i}.mlp.lin2.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"model.encoder.layers.{i}.mlp.lin2.bias")) + + rename_keys.append((f"blocks.{i}.norm1.weight", f"model.encoder.layers.{i}.layernorm_before.weight")) + rename_keys.append((f"blocks.{i}.norm1.bias", f"model.encoder.layers.{i}.layernorm_before.bias")) + rename_keys.append((f"blocks.{i}.norm2.weight", f"model.encoder.layers.{i}.layernorm_after.weight")) + rename_keys.append((f"blocks.{i}.norm2.bias", f"model.encoder.layers.{i}.layernorm_after.bias")) + + # fmt: on + + return rename_keys + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# We will verify our results on spongebob images +def prepare_input(): + image_input_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + ) + image_prompt_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + ) + mask_prompt_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + ) + + image_input = Image.open(requests.get(image_input_url, stream=True).raw) + image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw) + + return image_input, image_prompt, mask_prompt + + +@torch.no_grad() +def convert_seggpt_checkpoint(args): + model_name = args.model_name + pytorch_dump_folder_path = args.pytorch_dump_folder_path + verify_logits = args.verify_logits + push_to_hub = args.push_to_hub + + # Define default GroundingDINO configuation + config = SegGptConfig() + + # Load original checkpoint + checkpoint_url = "https://huggingface.co/BAAI/SegGpt/blob/main/seggpt_vit_large.pth" + original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] + + # # Rename keys + new_state_dict = original_state_dict.copy() + rename_keys = create_rename_keys(config) + + for src, dest in rename_keys: + rename_key(new_state_dict, src, dest) + + # Load HF model + model = SegGptForImageSegmentation(config) + model.eval() + missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) + print("Missing keys:", missing_keys) + print("Unexpected keys:", unexpected_keys) + + input_img, prompt_img, prompt_mask = prepare_input() + image_processor = SegGptImageProcessor() + inputs = image_processor(images=input_img, prompt_images=prompt_img, prompt_masks=prompt_mask, return_tensors="pt") + + expected_prompt_pixel_values = torch.tensor( + [ + [[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]], + [[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]], + [[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]], + ] + ) + + expected_pixel_values = torch.tensor( + [ + [[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]], + [[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]], + [[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]], + ] + ) + + expected_prompt_masks = torch.tensor( + [ + [[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]], + [[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]], + [[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]], + ] + ) + + assert torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4) + assert torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4) + assert torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4) + + torch.manual_seed(2) + outputs = model(**inputs) + print(outputs) + + if verify_logits: + expected_output = torch.tensor( + [ + [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], + [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], + [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], + ] + ) + assert torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_output, atol=1e-4) + print("Looks good!") + else: + print("Converted without verifying logits") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + image_processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print(f"Pushing model and processor for {model_name} to hub") + model.push_to_hub(f"EduardoPacheco/{model_name}") + image_processor.push_to_hub(f"EduardoPacheco/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="seggpt-vit-large", + type=str, + choices=["seggpt-vit-large"], + help="Name of the SegGpt model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--verify_logits", + action="store_false", + help="Whether or not to verify the logits against the original implementation.", + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_seggpt_checkpoint(args) diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/image_processing_seggpt.py b/venv/lib/python3.10/site-packages/transformers/models/seggpt/image_processing_seggpt.py new file mode 100644 index 0000000000000000000000000000000000000000..80fb94cdc7aaf4aaa69acf80361655748bf8f321 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seggpt/image_processing_seggpt.py @@ -0,0 +1,626 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for SegGPT.""" + +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import resize, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_channel_dimension_axis, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, is_torch_available, logging, requires_backends + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +# See https://arxiv.org/pdf/2212.02499.pdf at 3.1 Redefining Output Spaces as "Images" - Semantic Segmentation from PAINTER paper +# Taken from https://github.com/Abdullah-Meda/Painter/blob/main/Painter/data/coco_semseg/gen_color_coco_panoptic_segm.py#L31 +def build_palette(num_labels: int) -> List[Tuple[int, int]]: + base = int(num_labels ** (1 / 3)) + 1 + margin = 256 // base + + # we assume that class_idx 0 is the background which is mapped to black + color_list = [(0, 0, 0)] + for location in range(num_labels): + num_seq_r = location // base**2 + num_seq_g = (location % base**2) // base + num_seq_b = location % base + + R = 255 - num_seq_r * margin + G = 255 - num_seq_g * margin + B = 255 - num_seq_b * margin + + color_list.append((R, G, B)) + + return color_list + + +def get_num_channels(image: np.ndarray, input_data_format: ChannelDimension) -> int: + if image.ndim == 2: + return 0 + + channel_idx = get_channel_dimension_axis(image, input_data_format) + return image.shape[channel_idx] + + +def mask_to_rgb( + mask: np.ndarray, + palette: Optional[List[Tuple[int, int]]] = None, + input_data_format: Optional[ChannelDimension] = None, + data_format: Optional[ChannelDimension] = None, +) -> np.ndarray: + if input_data_format is None and mask.ndim > 2: + input_data_format = infer_channel_dimension_format(mask) + + data_format = data_format if data_format is not None else input_data_format + + num_channels = get_num_channels(mask, input_data_format) + + if num_channels == 3: + return to_channel_dimension_format(mask, data_format, input_data_format) if data_format is not None else mask + + if palette is not None: + height, width = mask.shape + + rgb_mask = np.zeros((3, height, width), dtype=np.uint8) + + classes_in_mask = np.unique(mask) + + for class_idx in classes_in_mask: + rgb_value = palette[class_idx] + class_mask = (mask == class_idx).astype(np.uint8) + class_mask = np.expand_dims(class_mask, axis=-1) + class_rgb_mask = class_mask * np.array(rgb_value) + class_rgb_mask = np.moveaxis(class_rgb_mask, -1, 0) + rgb_mask += class_rgb_mask.astype(np.uint8) + + rgb_mask = np.clip(rgb_mask, 0, 255).astype(np.uint8) + + else: + rgb_mask = np.repeat(mask[None, ...], 3, axis=0) + + return ( + to_channel_dimension_format(rgb_mask, data_format, input_data_format) if data_format is not None else rgb_mask + ) + + +class SegGptImageProcessor(BaseImageProcessor): + r""" + Constructs a SegGpt image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `(size["height"], + size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 448, "width": 448} + size = get_size_dict(size) + self.do_resize = do_resize + self.do_rescale = do_rescale + self.do_normalize = do_normalize + self.size = size + self.resample = resample + self.rescale_factor = rescale_factor + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + + def get_palette(self, num_labels: int) -> List[Tuple[int, int]]: + """Build a palette to map the prompt mask from a single channel to a 3 channel RGB. + + Args: + num_labels (`int`): + Number of classes in the segmentation task (excluding the background). + + Returns: + `List[Tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB. + """ + return build_palette(num_labels) + + def mask_to_rgb( + self, + image: np.ndarray, + palette: Optional[List[Tuple[int, int]]] = None, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Convert a mask to RGB format. + + Args: + image (`np.ndarray`): + Mask to convert to RGB format. If the mask is already in RGB format, it will be passed through. + palette (`List[Tuple[int, int]]`, *optional*, defaults to `None`): + Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel + dimension. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + + Returns: + `np.ndarray`: The mask in RGB format. + """ + return mask_to_rgb( + image, + palette=palette, + data_format=data_format, + input_data_format=input_data_format, + ) + + # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def _preprocess_step( + self, + images: ImageInput, + is_mask: bool = False, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + num_labels: Optional[int] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + is_mask (`bool`, *optional*, defaults to `False`): + Whether the image is a mask. If True, the image is converted to RGB using the palette if + `self.num_labels` is specified otherwise RGB is achieved by duplicating the channel. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after + resizing. + resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has + an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + num_labels: (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx + channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed + through as is if it is already in RGB format or being duplicated across the channel dimension. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + resample = resample if resample is not None else self.resample + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + size = size if size is not None else self.size + size_dict = get_size_dict(size) + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None: + raise ValueError("Size must be specified if do_resize is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None and not is_mask: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if is_mask: + palette = self.get_palette(num_labels) if num_labels is not None else None + # Since this is the input for the next transformations its format should be the same as the input_data_format + images = [ + self.mask_to_rgb(image=image, palette=palette, data_format=ChannelDimension.FIRST) for image in images + ] + input_data_format = ChannelDimension.FIRST + + if do_resize: + images = [ + self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + return images + + def preprocess( + self, + images: Optional[ImageInput] = None, + prompt_images: Optional[ImageInput] = None, + prompt_masks: Optional[ImageInput] = None, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + num_labels: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + prompt_images (`ImageInput`): + Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + prompt_masks (`ImageInput`): + Prompt mask from prompt image to _preprocess. Expects a single or batch of masks. If the mask masks are + a single channel then it will be converted to RGB using the palette if `self.num_labels` is specified + or by just repeating the channel if not. If the mask is already in RGB format, it will be passed through. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after + resizing. + resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has + an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + num_labels: (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx + channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed + through as is if it is already in RGB format or being duplicated across the channel dimension. + """ + if all(v is None for v in [images, prompt_images, prompt_masks]): + raise ValueError("At least one of images, prompt_images, prompt_masks must be specified.") + + data = {} + + if images is not None: + images = self._preprocess_step( + images, + is_mask=False, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + data["pixel_values"] = images + + if prompt_images is not None: + prompt_images = self._preprocess_step( + prompt_images, + is_mask=False, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + data["prompt_pixel_values"] = prompt_images + + if prompt_masks is not None: + prompt_masks = self._preprocess_step( + prompt_masks, + is_mask=True, + do_resize=do_resize, + size=size, + resample=PILImageResampling.NEAREST, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + num_labels=num_labels, + **kwargs, + ) + + data["prompt_masks"] = prompt_masks + + return BatchFeature(data=data, tensor_type=return_tensors) + + def post_process_semantic_segmentation( + self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None, num_labels: Optional[int] = None + ): + """ + Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`SegGptImageSegmentationOutput`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + num_labels (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class + indices. This value should be the same used when preprocessing inputs. + Returns: + semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic + segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is + specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + """ + requires_backends(self, ["torch"]) + # batch_size x num_channels x 2*height x width + masks = outputs.pred_masks + + # Predicted mask and prompt are concatenated in the height dimension + # batch_size x num_channels x height x width + masks = masks[:, :, masks.shape[2] // 2 :, :] + + # To unnormalize we need to permute to channel last + # batch_size x height x width x num_channels + std = torch.tensor(self.image_std).to(masks.device) + mean = torch.tensor(self.image_mean).to(masks.device) + + masks = masks.permute(0, 2, 3, 1) * std + mean + + # batch_size x num_channels x height x width + masks = masks.permute(0, 3, 1, 2) + + # Clip to match with palette if specified + masks = torch.clip(masks * 255, 0, 255) + + semantic_segmentation = [] + palette_tensor = None + palette = self.get_palette(num_labels) if num_labels is not None else None + if palette is not None: + palette_tensor = torch.tensor(palette).float().to(masks.device) + _, num_channels, _, _ = masks.shape + palette_tensor = palette_tensor.view(1, 1, num_labels + 1, num_channels) + + for idx, mask in enumerate(masks): + if target_sizes is not None: + mask = torch.nn.functional.interpolate( + mask.unsqueeze(0), + size=target_sizes[idx], + mode="nearest", + )[0] + + if num_labels is not None: + channels, height, width = mask.shape + dist = mask.permute(1, 2, 0).view(height, width, 1, channels) + dist = dist - palette_tensor + dist = torch.pow(dist, 2) + dist = torch.sum(dist, dim=-1) + pred = dist.argmin(dim=-1) + + else: + # If no palette is specified SegGpt will try to paint using the mask class idx as RGB + pred = mask.mean(dim=0).int() + + semantic_segmentation.append(pred) + + return semantic_segmentation diff --git a/venv/lib/python3.10/site-packages/transformers/models/seggpt/modeling_seggpt.py b/venv/lib/python3.10/site-packages/transformers/models/seggpt/modeling_seggpt.py new file mode 100644 index 0000000000000000000000000000000000000000..79fd309eaf808f0e79fda3a35d8b845414748531 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seggpt/modeling_seggpt.py @@ -0,0 +1,1011 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch SegGpt model.""" + + +import collections.abc +import math +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import functional as F + +from ...activations import ACT2FN +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_seggpt import SegGptConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "SegGptConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "BAAI/seggpt-vit-large" +_EXPECTED_OUTPUT_SHAPE = [3, 896, 448] + + +from ..deprecated._archive_maps import SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class SegGptEncoderOutput(ModelOutput): + """ + Output type of [`SegGptEncoderOutput`]. + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape `(batch_size, patch_height, patch_width, hidden_size)`. + attentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): + Tuple of *torch.FloatTensor* (one for each layer) of shape + `(batch_size, num_heads, seq_len, seq_len)`. + intermediate_hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.intermediate_hidden_state_indices` is set): + Tuple of `torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`. + Each element in the Tuple corresponds to the output of the layer specified in `config.intermediate_hidden_state_indices`. + Additionaly, each feature passes through a LayerNorm. + """ + + last_hidden_state: torch.FloatTensor + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + intermediate_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SegGptImageSegmentationOutput(ModelOutput): + """ + Output type of [`SegGptImageSegmentationOutput`]. + + Args: + loss (`torch.FloatTensor`, `optional`, returned when `labels` is provided): + The loss value. + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The predicted masks. + hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape `(batch_size, patch_height, patch_width, hidden_size)`. + attentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape + `(batch_size, num_heads, seq_len, seq_len)`. + """ + + loss: Optional[torch.FloatTensor] = None + pred_masks: Optional[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.sam.modeling_sam.SamPatchEmbeddings with Sam->SegGpt +class SegGptPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values): + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) + return embeddings + + +class SegGptEmbeddings(nn.Module): + """ + Construct the embeddings from patch, position embeddings for input and prompt. + """ + + def __init__(self, config: SegGptConfig) -> None: + super().__init__() + + self.mask_token = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.segment_token_input = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.segment_token_prompt = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + # token for seg types + self.type_token_semantic = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.type_token_instance = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + + self.patch_embeddings = SegGptPatchEmbeddings(config) + + num_positions = (config.pretrain_image_size // config.patch_size) ** 2 + 1 + self.position_embeddings = nn.Parameter(torch.randn(1, num_positions, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def interpolate_pos_encoding(self, height: int, width: int) -> torch.Tensor: + patch_pos_embed = self.position_embeddings[:, 1:] + num_patches = patch_pos_embed.shape[1] + pretrain_patch_size = int(math.sqrt(num_patches)) + + if pretrain_patch_size != height or pretrain_patch_size != width: + patch_pos_embed = F.interpolate( + patch_pos_embed.reshape(1, pretrain_patch_size, pretrain_patch_size, -1).permute(0, 3, 1, 2), + size=(height, width), + mode="bicubic", + align_corners=False, + ) + + return patch_pos_embed.permute(0, 2, 3, 1) + else: + return patch_pos_embed.reshape(1, height, width, -1) + + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + embedding_type: Optional[str] = None, + ) -> torch.Tensor: + input_embeddings = self.patch_embeddings(pixel_values) + prompt_embeddings = self.patch_embeddings(prompt_pixel_values) + + batch_size, patch_height, patch_width, _ = input_embeddings.shape + + mask_token = self.mask_token.expand(batch_size, patch_height, patch_width, -1) + # replace the masked visual tokens by mask_token + w = bool_masked_pos.unsqueeze(-1).type_as(mask_token).reshape(-1, patch_height, patch_width, 1) + prompt_embeddings = prompt_embeddings * (1 - w) + mask_token * w + + embedding_type = embedding_type if embedding_type is not None else "instance" + + # add positional encoding to each token + pos_embed = self.interpolate_pos_encoding(patch_height, patch_width) + + # add segment token + input_embeddings = input_embeddings + self.segment_token_input + prompt_embeddings = prompt_embeddings + self.segment_token_prompt + + # add position embedding skipping CLS + input_embeddings = input_embeddings + pos_embed + prompt_embeddings = prompt_embeddings + pos_embed + + # add type embedding to each token + if embedding_type == "semantic": + type_embedding = self.type_token_semantic + elif embedding_type == "instance": + type_embedding = self.type_token_instance + else: + raise ValueError(f"Embedding type should be either 'semantic' or 'instance', but got {embedding_type}") + + input_embeddings = input_embeddings + type_embedding + prompt_embeddings = prompt_embeddings + type_embedding + + embeddings = torch.cat((input_embeddings, prompt_embeddings), dim=0) + + return embeddings + + +class SegGptAttention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + + input_size = (image_size[0] // config.patch_size, image_size[1] // config.patch_size) + head_dim = config.hidden_size // config.num_attention_heads + + self.num_attention_heads = config.num_attention_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) + self.proj = nn.Linear(config.hidden_size, config.hidden_size) + + self.use_relative_position_embeddings = config.use_relative_position_embeddings + if self.use_relative_position_embeddings: + if input_size is None: + raise ValueError("Input size must be provided if using relative positional encoding.") + + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + + Args: + q_size (int): + size of the query. + k_size (int): + size of key k. + rel_pos (`torch.Tensor`): + relative position embeddings (L, channel). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + def add_decomposed_rel_pos( + self, + attn: torch.Tensor, + query: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], + ) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + + Args: + attn (`torch.Tensor`): + attention map. + query (`torch.Tensor`): + query q in the attention layer with shape (batch_size, query_height * query_width, channel). + rel_pos_h (`torch.Tensor`): + relative position embeddings (Lh, channel) for height axis. + rel_pos_w (`torch.Tensor`): + relative position embeddings (Lw, channel) for width axis. + q_size (tuple): + spatial sequence size of query q with (query_height, query_width). + k_size (tuple): + spatial sequence size of key k with (key_height, key_width). + + Returns: + attn (`torch.Tensor`): + attention map with added relative positional embeddings. + """ + query_height, query_width = q_size + key_height, key_width = k_size + relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) + relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) + + batch_size, _, dim = query.shape + reshaped_query = query.reshape(batch_size, query_height, query_width, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) + rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) + attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width) + attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width) + return attn + + def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: + batch_size, height, width, _ = hidden_states.shape + # qkv with shape (3, batch_size, nHead, height * width, channel) + qkv = ( + self.qkv(hidden_states) + .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) + .permute(2, 0, 3, 1, 4) + ) + # q, k, v with shape (batch_size * nHead, height * width, channel) + query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) + + attn_weights = (query * self.scale) @ key.transpose(-2, -1) + + if self.use_relative_position_embeddings: + attn_weights = self.add_decomposed_rel_pos( + attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) + ) + + attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_attention_heads, height * width, -1) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_attention_heads, height * width, -1) + else: + attn_weights_reshaped = None + + attn_output = (attn_weights @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) + attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) + + attn_output = self.proj(attn_output) + + return (attn_output, attn_weights_reshaped) + + +# Copied from transformers.models.sam.modeling_sam.SamMLPBlock with SamMLPBlock->SegGptMlp +class SegGptMlp(nn.Module): + def __init__(self, config): + super().__init__() + self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) + self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) + self.act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.lin1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.lin2(hidden_states) + return hidden_states + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->SegGpt +class SegGptDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class SegGptLayer(nn.Module): + def __init__(self, config: SegGptConfig, drop_path_rate: float) -> None: + super().__init__() + self.attention = SegGptAttention(config) + self.mlp = SegGptMlp(config) + self.drop_path = SegGptDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + ensemble_cond: int, + feature_ensemble: bool = False, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in SegGpt, layernorm is applied before self-attention + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + if feature_ensemble and attention_output.shape[0] // 2 >= ensemble_cond: + prompt, inputs = attention_output.split(attention_output.shape[1] // 2, dim=1) + if ensemble_cond == 2: + num_prompts = attention_output.shape[0] // 2 + inputs = inputs.reshape(2, num_prompts, -1) + inputs = inputs.mean(dim=1, keepdim=True).expand_as(inputs) + inputs = inputs.reshape(*prompt.shape) + else: + inputs = inputs.mean(dim=0, keepdim=True).expand_as(inputs) + attention_output = torch.cat([prompt, inputs], dim=1) + + # first residual connection + hidden_states = self.drop_path(attention_output) + hidden_states + residual = hidden_states + + hidden_states = self.layernorm_after(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + self.drop_path(hidden_states) + + outputs = (hidden_states,) + outputs + + return outputs + + +class SegGptEncoder(nn.Module): + def __init__(self, config: SegGptConfig) -> None: + super().__init__() + self.config = config + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] + self.layers = nn.ModuleList([SegGptLayer(config, dpr[i]) for i in range(config.num_hidden_layers)]) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + feature_ensemble: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, SegGptEncoderOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + intermediate_hidden_states = [] + + for i, layer_module in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # Condition to check if we have the appropriate number of prompts to ensemble + ensemble_cond = 2 if self.config.merge_index > i else 1 + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + ensemble_cond, + feature_ensemble, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, ensemble_cond, feature_ensemble, output_attentions) + + hidden_states = layer_outputs[0] + + if i == self.config.merge_index: + hidden_states = ( + hidden_states[: hidden_states.shape[0] // 2] + hidden_states[hidden_states.shape[0] // 2 :] + ) * 0.5 + + if i in self.config.intermediate_hidden_state_indices: + intermediate_hidden_states.append(self.layernorm(hidden_states)) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, all_hidden_states, all_self_attentions, intermediate_hidden_states] + if v is not None + ) + return SegGptEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + intermediate_hidden_states=intermediate_hidden_states, + ) + + +# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->SegGpt +class SegGptLayerNorm(nn.Module): + r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError(f"Unsupported data format: {self.data_format}") + self.normalized_shape = (normalized_shape,) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.data_format == "channels_last": + x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + input_dtype = x.dtype + x = x.float() + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = x.to(dtype=input_dtype) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class SegGptDecoderHead(nn.Module): + def __init__(self, config): + super().__init__() + self.conv = nn.Conv2d( + config.decoder_hidden_size, + config.decoder_hidden_size, + kernel_size=3, + padding=1, + ) + self.layernorm = SegGptLayerNorm( + normalized_shape=config.decoder_hidden_size, eps=config.layer_norm_eps, data_format="channels_first" + ) + self.act_fct = ACT2FN[config.hidden_act] + self.head = nn.Conv2d(config.decoder_hidden_size, 3, kernel_size=1, bias=True) # decoder to patch + + def forward(self, hidden_states: torch.FloatTensor): + hidden_states = self.conv(hidden_states) + hidden_states = self.layernorm(hidden_states) + hidden_states = self.act_fct(hidden_states) + hidden_states = self.head(hidden_states) + + return hidden_states + + +class SegGptDecoder(nn.Module): + def __init__(self, config): + super().__init__() + self.decoder_embed = nn.Linear( + config.hidden_size * len(config.intermediate_hidden_state_indices), + config.patch_size**2 * config.decoder_hidden_size, + bias=True, + ) + self.decoder_pred = SegGptDecoderHead(config) + self.patch_size = config.patch_size + self.decoder_hidden_size = config.decoder_hidden_size + self.config = config + + def _reshape_hidden_states(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + batch_size, patch_height, patch_width, _ = hidden_states.shape + hidden_states = hidden_states.reshape( + batch_size, patch_height, patch_width, self.patch_size, self.patch_size, self.decoder_hidden_size + ) + hidden_states = hidden_states.permute(0, 5, 1, 3, 2, 4) + hidden_states = hidden_states.reshape( + shape=(batch_size, -1, patch_height * self.patch_size, patch_width * self.patch_size) + ) + + return hidden_states + + def forward(self, hidden_states: torch.FloatTensor): + hidden_states = self.decoder_embed(hidden_states) + hidden_states = self._reshape_hidden_states(hidden_states) + hidden_states = self.decoder_pred(hidden_states) + + return hidden_states + + +class SegGptPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SegGptConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = ["SegGptEmbeddings", "SegGptLayer"] + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=std).to( + module.weight.dtype + ) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, SegGptAttention): + module.rel_pos_h.data = nn.init.trunc_normal_( + module.rel_pos_h.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.rel_pos_h.dtype) + + module.rel_pos_w.data = nn.init.trunc_normal_( + module.rel_pos_w.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.rel_pos_w.dtype) + + elif isinstance(module, SegGptEmbeddings): + module.position_embeddings.data = nn.init.trunc_normal_( + module.position_embeddings.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.position_embeddings.dtype) + + torch.nn.init.normal_(module.mask_token, std=std) + torch.nn.init.normal_(module.segment_token_input, std=std) + torch.nn.init.normal_(module.segment_token_prompt, std=std) + torch.nn.init.normal_(module.type_token_semantic, std=std) + torch.nn.init.normal_(module.type_token_instance, std=std) + + +SEGGPT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`SegGptConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SEGGPT_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] + for details. + + prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Prompt pixel values. Prompt pixel values can be obtained using [`AutoImageProcessor`]. See + [`SegGptImageProcessor.__call__`] for details. + + prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Prompt mask. Prompt mask can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for + details. + + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + feature_ensemble (`bool`, *optional*): + Boolean indicating whether to use feature ensemble or not. If `True`, the model will use feature ensemble + if we have at least two prompts. If `False`, the model will not use feature ensemble. This argument should + be considered when doing few-shot inference on an input image i.e. more than one prompt for the same image. + + embedding_type (`str`, *optional*): + Embedding type. Indicates whether the prompt is a semantic or instance embedding. Can be either + instance or semantic. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare SegGpt Model transformer outputting raw hidden-states without any specific head on top.", + SEGGPT_START_DOCSTRING, +) +class SegGptModel(SegGptPreTrainedModel): + def __init__(self, config: SegGptConfig): + super().__init__(config) + self.config = config + + self.embeddings = SegGptEmbeddings(config) + self.encoder = SegGptEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> SegGptPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(SEGGPT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SegGptEncoderOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + prompt_masks: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + feature_ensemble: Optional[bool] = None, + embedding_type: Optional[str] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SegGptEncoderOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import SegGptImageProcessor, SegGptModel + >>> from PIL import Image + >>> import requests + + >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + + >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) + >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") + + >>> checkpoint = "BAAI/seggpt-vit-large" + >>> model = SegGptModel.from_pretrained(checkpoint) + >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) + + >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> list(outputs.last_hidden_state.shape) + [1, 56, 28, 1024] + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + feature_ensemble = feature_ensemble if feature_ensemble is not None else False + + expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype + pixel_values = pixel_values.to(expected_dtype) + prompt_pixel_values = prompt_pixel_values.to(expected_dtype) + + # Prepare inputs + pixel_values = torch.cat((prompt_pixel_values, pixel_values), dim=2) + prompt_pixel_values = torch.cat((prompt_masks, prompt_masks), dim=2) + + # We concat on height axis so SegGPT can handle as a single image, hence we need to mask the portion + # of the prompt pixels that will be destinated to the prediction as they don't add any information. + if bool_masked_pos is None: + num_patches = self.embeddings.patch_embeddings.num_patches + bool_masked_pos = torch.zeros(num_patches, dtype=torch.bool).to(pixel_values.device) + bool_masked_pos[num_patches // 2 :] = 1 + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + embedding_output = self.embeddings( + pixel_values, prompt_pixel_values, embedding_type=embedding_type, bool_masked_pos=bool_masked_pos + ) + + encoder_outputs = self.encoder( + embedding_output, + feature_ensemble=feature_ensemble, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return encoder_outputs + + +def patchify(tensor: torch.Tensor, patch_size: int) -> torch.Tensor: + batch_size, num_channels, height, width = tensor.shape + patch_height = height // patch_size + patch_width = width // patch_size + + tensor = tensor.reshape(shape=(batch_size, num_channels, patch_height, patch_size, patch_width, patch_size)) + tensor = tensor.permute(0, 2, 4, 3, 5, 1) + tensor = tensor.reshape(shape=(batch_size, patch_height * patch_width, patch_size**2 * 3)) + + return tensor + + +def unpatchify(tensor: torch.Tensor, patch_height: int, patch_width: int) -> torch.Tensor: + batch_size = tensor.shape[0] + patch_size = int((tensor.shape[-1] / 3) ** 0.5) + if patch_height * patch_width != tensor.shape[1]: + raise ValueError(f"Number of patches {tensor.shape[1]} does not match patch height and width.") + + tensor = tensor.reshape(shape=(batch_size, patch_height, patch_width, patch_size, patch_size, 3)) + tensor = tensor.permute(0, 5, 1, 3, 2, 4) + tensor = tensor.reshape(shape=(batch_size, 3, patch_height * patch_size, patch_width * patch_size)) + + return tensor + + +class SegGptLoss(nn.Module): + def __init__(self, config): + super().__init__() + self.beta = config.beta + self.patch_size = config.patch_size + + def forward( + self, + pixel_values: torch.FloatTensor, + prompt_pixel_values: torch.FloatTensor, + pred_masks: torch.FloatTensor, + labels: torch.FloatTensor, + bool_masked_pos: torch.BoolTensor, + ): + """Computes the L1 loss between the predicted masks and the ground truth masks. + + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Concatenated pixel values from prompt and input images. + + prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Concatenated pixel values from mask prompt. + + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Predicted masks. + + labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Ground truth mask for input images. + + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + Returns: + `torch.FloatTensor`: The mean L1 loss between the predicted masks and the ground truth masks. + """ + mask = bool_masked_pos[:, :, None].repeat(1, 1, self.patch_size**2 * 3) + mask = unpatchify(mask, pixel_values.shape[1] // self.patch_size, pixel_values.shape[2] // self.patch_size) + # Changing dummy mask in prompt_pixel_values to labels values + prompt_pixel_values = prompt_pixel_values.clone() + prompt_pixel_values[:, :, prompt_pixel_values.shape[2] // 2 :, :] = labels + loss = F.smooth_l1_loss(pred_masks, prompt_pixel_values, reduction="none", beta=self.beta) + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches + + return loss + + +@add_start_docstrings( + "SegGpt model with a decoder on top for one-shot image segmentation.", + SEGGPT_START_DOCSTRING, +) +class SegGptForImageSegmentation(SegGptPreTrainedModel): + def __init__(self, config: SegGptConfig): + super().__init__(config) + self.config = config + + self.model = SegGptModel(config) + self.decoder = SegGptDecoder(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(SEGGPT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SegGptImageSegmentationOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + prompt_masks: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + feature_ensemble: Optional[bool] = None, + embedding_type: Optional[str] = None, + labels: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SegGptImageSegmentationOutput]: + r""" + labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, `optional`): + Ground truth mask for input images. + + Returns: + + Examples: + + ```python + >>> from transformers import SegGptImageProcessor, SegGptForImageSegmentation + >>> from PIL import Image + >>> import requests + + >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + + >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) + >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") + + >>> checkpoint = "BAAI/seggpt-vit-large" + >>> model = SegGptForImageSegmentation.from_pretrained(checkpoint) + >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) + + >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") + >>> outputs = model(**inputs) + >>> result = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image_input.size[::-1]])[0] + >>> print(list(result.shape)) + [170, 297] + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if bool_masked_pos is None: + num_patches = self.model.embeddings.patch_embeddings.num_patches + bool_masked_pos = torch.zeros(num_patches, dtype=torch.bool).to(pixel_values.device) + bool_masked_pos[num_patches // 2 :] = 1 + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + outputs = self.model( + pixel_values=pixel_values, + prompt_pixel_values=prompt_pixel_values, + prompt_masks=prompt_masks, + bool_masked_pos=bool_masked_pos, + feature_ensemble=feature_ensemble, + embedding_type=embedding_type, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + intermediate_hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[-1] + intermediate_hidden_states = torch.cat(intermediate_hidden_states, dim=-1) + pred_masks = self.decoder(intermediate_hidden_states) + + loss = None + if labels is not None: + loss_fn = SegGptLoss(self.config) + loss = loss_fn(pixel_values, prompt_pixel_values, pred_masks, labels, bool_masked_pos) + + if not return_dict: + output = (pred_masks,) + if output_hidden_states: + output = output + (outputs[1],) + + if output_attentions: + idx = 2 if output_hidden_states else 1 + output = output + (outputs[idx],) + + if loss is not None: + output = (loss,) + output + return output + + return SegGptImageSegmentationOutput( + loss=loss, + pred_masks=pred_masks, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c36cb750cfa4e6273de0a8a2646236ee14b516d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_vit_msn"] = [ + "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTMSNModel", + "ViTMSNForImageClassification", + "ViTMSNPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_vit_msn import ( + VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTMSNForImageClassification, + ViTMSNModel, + ViTMSNPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e98d10466082bba1ef0bb53d231fc353c5277628 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py new file mode 100644 index 0000000000000000000000000000000000000000..296434346625f56e265787ad7e567188d45d02dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ViT MSN model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class ViTMSNConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ViTMSNModel`]. It is used to instantiate an ViT + MSN model according to the specified arguments, defining the model architecture. Instantiating a configuration with + the defaults will yield a similar configuration to that of the ViT + [facebook/vit_msn_base](https://huggingface.co/facebook/vit_msn_base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + + Example: + + ```python + >>> from transformers import ViTMSNModel, ViTMSNConfig + + >>> # Initializing a ViT MSN vit-msn-base style configuration + >>> configuration = ViTConfig() + + >>> # Initializing a model from the vit-msn-base style configuration + >>> model = ViTMSNModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "vit_msn" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-06, + image_size=224, + patch_size=16, + num_channels=3, + qkv_bias=True, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..899c74f183205e9fdc18984a1f15e877bc64fe31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert ViT MSN checkpoints from the original repository: https://github.com/facebookresearch/msn""" + +import argparse +import json + +import requests +import torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel +from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +torch.set_grad_enabled(False) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config, base_model=False): + rename_keys = [] + for i in range(config.num_hidden_layers): + # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms + rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) + rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) + rename_keys.append( + (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") + ) + rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) + rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) + rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) + rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) + rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) + rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) + rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) + + # projection layer + position embeddings + rename_keys.extend( + [ + ("module.cls_token", "vit.embeddings.cls_token"), + ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), + ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), + ("module.pos_embed", "vit.embeddings.position_embeddings"), + ] + ) + + if base_model: + # layernorm + pooler + rename_keys.extend( + [ + ("module.norm.weight", "layernorm.weight"), + ("module.norm.bias", "layernorm.bias"), + ] + ) + + # if just the base model, we should remove "vit" from all keys that start with "vit" + rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] + else: + # layernorm + classification head + rename_keys.extend( + [ + ("norm.weight", "vit.layernorm.weight"), + ("norm.bias", "vit.layernorm.bias"), + ("head.weight", "classifier.weight"), + ("head.bias", "classifier.bias"), + ] + ) + + return rename_keys + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_q_k_v(state_dict, config, base_model=False): + for i in range(config.num_hidden_layers): + if base_model: + prefix = "" + else: + prefix = "vit." + # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight") + in_proj_bias = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ + : config.hidden_size, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ + config.hidden_size : config.hidden_size * 2, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ + config.hidden_size : config.hidden_size * 2 + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ + -config.hidden_size :, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] + + +def remove_classification_head_(state_dict): + ignore_keys = ["head.weight", "head.bias"] + for k in ignore_keys: + state_dict.pop(k, None) + + +def remove_projection_head(state_dict): + # projection head is used in the self-supervised pre-training in MSN, + # for downstream task it's not needed. + ignore_keys = [ + "module.fc.fc1.weight", + "module.fc.fc1.bias", + "module.fc.bn1.weight", + "module.fc.bn1.bias", + "module.fc.bn1.running_mean", + "module.fc.bn1.running_var", + "module.fc.bn1.num_batches_tracked", + "module.fc.fc2.weight", + "module.fc.fc2.bias", + "module.fc.bn2.weight", + "module.fc.bn2.bias", + "module.fc.bn2.running_mean", + "module.fc.bn2.running_var", + "module.fc.bn2.num_batches_tracked", + "module.fc.fc3.weight", + "module.fc.fc3.bias", + ] + for k in ignore_keys: + state_dict.pop(k, None) + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +def convert_vit_msn_checkpoint(checkpoint_url, pytorch_dump_folder_path): + config = ViTMSNConfig() + config.num_labels = 1000 + + repo_id = "datasets/huggingface/label-files" + filename = "imagenet-1k-id2label.json" + id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + if "s16" in checkpoint_url: + config.hidden_size = 384 + config.intermediate_size = 1536 + config.num_attention_heads = 6 + elif "l16" in checkpoint_url: + config.hidden_size = 1024 + config.intermediate_size = 4096 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + config.hidden_dropout_prob = 0.1 + elif "b4" in checkpoint_url: + config.patch_size = 4 + elif "l7" in checkpoint_url: + config.patch_size = 7 + config.hidden_size = 1024 + config.intermediate_size = 4096 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + config.hidden_dropout_prob = 0.1 + + model = ViTMSNModel(config) + + state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["target_encoder"] + + image_processor = ViTImageProcessor(size=config.image_size) + + remove_projection_head(state_dict) + rename_keys = create_rename_keys(config, base_model=True) + + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_q_k_v(state_dict, config, base_model=True) + + model.load_state_dict(state_dict) + model.eval() + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + + image = Image.open(requests.get(url, stream=True).raw) + image_processor = ViTImageProcessor( + size=config.image_size, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD + ) + inputs = image_processor(images=image, return_tensors="pt") + + # forward pass + torch.manual_seed(2) + outputs = model(**inputs) + last_hidden_state = outputs.last_hidden_state + + # The following Colab Notebook was used to generate these outputs: + # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb + if "s16" in checkpoint_url: + expected_slice = torch.tensor([[-1.0915, -1.4876, -1.1809]]) + elif "b16" in checkpoint_url: + expected_slice = torch.tensor([[14.2889, -18.9045, 11.7281]]) + elif "l16" in checkpoint_url: + expected_slice = torch.tensor([[41.5028, -22.8681, 45.6475]]) + elif "b4" in checkpoint_url: + expected_slice = torch.tensor([[-4.3868, 5.2932, -0.4137]]) + else: + expected_slice = torch.tensor([[-0.1792, -0.6465, 2.4263]]) + + # verify logits + assert torch.allclose(last_hidden_state[:, 0, :3], expected_slice, atol=1e-4) + + print(f"Saving model to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--checkpoint_url", + default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", + type=str, + help="URL of the checkpoint you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + + args = parser.parse_args() + convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py new file mode 100644 index 0000000000000000000000000000000000000000..9c2269a3ae546fde6b04d05350b9c7b33481aa54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py @@ -0,0 +1,688 @@ +# coding=utf-8 +# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ViT MSN (masked siamese network) model.""" + + +import collections.abc +import math +from typing import Dict, List, Optional, Set, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_vit_msn import ViTMSNConfig + + +logger = logging.get_logger(__name__) + + +_CONFIG_FOR_DOC = "ViTMSNConfig" +_CHECKPOINT_FOR_DOC = "facebook/vit-msn-small" + +from ..deprecated._archive_maps import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class ViTMSNEmbeddings(nn.Module): + """ + Construct the CLS token, position and patch embeddings. Optionally, also the mask token. + """ + + def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None + self.patch_embeddings = ViTMSNPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. + + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 + """ + + num_patches = embeddings.shape[1] - 1 + num_positions = self.position_embeddings.shape[1] - 1 + if num_patches == num_positions and height == width: + return self.position_embeddings + class_pos_embed = self.position_embeddings[:, 0] + patch_pos_embed = self.position_embeddings[:, 1:] + dim = embeddings.shape[-1] + patch_window_height = height // self.config.patch_size + patch_window_width = width // self.config.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + patch_window_height, patch_window_width = patch_window_height + 0.1, patch_window_width + 0.1 + patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) + patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed, + scale_factor=( + patch_window_height / math.sqrt(num_positions), + patch_window_width / math.sqrt(num_positions), + ), + mode="bicubic", + align_corners=False, + ) + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def forward( + self, + pixel_values: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + interpolate_pos_encoding: bool = False, + ) -> torch.Tensor: + batch_size, num_channels, height, width = pixel_values.shape + embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) + + if bool_masked_pos is not None: + seq_length = embeddings.shape[1] + mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) + # replace the masked visual tokens by mask_tokens + mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1.0 - mask) + mask_tokens * mask + + # add the [CLS] token to the embedded patch tokens + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings), dim=1) + + # add positional encoding to each token + if interpolate_pos_encoding: + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + else: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTPatchEmbeddings with ViT->ViTMSN +class ViTMSNPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + f" Expected {self.num_channels} but got {num_channels}." + ) + if not interpolate_pos_encoding: + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model" + f" ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTMSN +class ViTMSNSelfAttention(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN +class ViTMSNSelfOutput(nn.Module): + """ + The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN +class ViTMSNAttention(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.attention = ViTMSNSelfAttention(config) + self.output = ViTMSNSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads: Set[int]) -> None: + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_outputs = self.attention(hidden_states, head_mask, output_attentions) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTMSN +class ViTMSNIntermediate(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTMSN +class ViTMSNOutput(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + hidden_states = hidden_states + input_tensor + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMSN +class ViTMSNLayer(nn.Module): + """This corresponds to the Block class in the timm implementation.""" + + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ViTMSNAttention(config) + self.intermediate = ViTMSNIntermediate(config) + self.output = ViTMSNOutput(config) + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in ViTMSN, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + # first residual connection + hidden_states = attention_output + hidden_states + + # in ViTMSN, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + + # second residual connection is done here + layer_output = self.output(layer_output, hidden_states) + + outputs = (layer_output,) + outputs + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN +class ViTMSNEncoder(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.config = config + self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + layer_head_mask, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ViTMSNPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ViTMSNConfig + base_model_prefix = "vit" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + # todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211 + # when creating pre-training scripts. + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +VIT_MSN_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ViTMSNConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VIT_MSN_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] + for details. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + interpolate_pos_encoding (`bool`, *optional*): + Whether to interpolate the pre-trained position encodings. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ViTMSN Model outputting raw hidden-states without any specific head on top.", + VIT_MSN_START_DOCSTRING, +) +class ViTMSNModel(ViTMSNPreTrainedModel): + def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False): + super().__init__(config) + self.config = config + + self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token) + self.encoder = ViTMSNEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> ViTMSNPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, BaseModelOutput]: + r""" + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, ViTMSNModel + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small") + >>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small") + >>> inputs = image_processor(images=image, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding + ) + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + + if not return_dict: + head_outputs = (sequence_output,) + return head_outputs + encoder_outputs[1:] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +# Caution: We don't have the weights for the classification head yet. This class +# is here for the users that are interested to fine-tune the base model (ViTMSNModel). +@add_start_docstrings( + """ + ViTMSN Model with an image classification head on top e.g. for ImageNet. + """, + VIT_MSN_START_DOCSTRING, +) +class ViTMSNForImageClassification(ViTMSNPreTrainedModel): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.vit = ViTMSNModel(config) + + # Classifier head + self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, ViTMSNForImageClassification + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small") + >>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small") + + >>> inputs = image_processor(images=image, return_tensors="pt") + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = logits.argmax(-1).item() + >>> print(model.config.id2label[predicted_label]) + tusker + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.vit( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.classifier(sequence_output[:, 0, :]) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + )