diff --git a/.gitattributes b/.gitattributes index 85103d2149302bd68a4f6784dfd094bee3529ec9..a3b08a51d24df209d88f27e14e30826e411219c3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -106,3 +106,4 @@ venv/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux- venv/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolver.so.11 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so filter=lfs diff=lfs merge=lfs -text diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..770bd3a3bacd57b25320b34641708cdf1cd2fa1d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704623 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..48e2468e5e54a8b278b392de7c92424fc209eb09 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:37:04,095 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:37:08,625 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:37:08,627 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:37:08,627 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step20 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step20/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c69dc10ed4a221c67b375b5be816460f3411870d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:37:03.962034", + "startedAt": "2024-05-14T16:37:03.522848", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.650802631579, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.996, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3215.606, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3213.19, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4235305786133 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..d269869abc3140615f2059bf0cbcbb49012a648c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 16:37:03,534 INFO StreamThr :127582 [internal.py:wandb_internal():85] W&B internal server running at pid: 127582, started at: 2024-05-14 16:37:03.533613 +2024-05-14 16:37:03,536 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:37:03,537 INFO WriterThread:127582 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/run-bz9fpvey.wandb +2024-05-14 16:37:03,538 DEBUG SenderThread:127582 [sender.py:send():378] send: header +2024-05-14 16:37:03,548 DEBUG SenderThread:127582 [sender.py:send():378] send: run +2024-05-14 16:37:03,817 INFO SenderThread:127582 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files +2024-05-14 16:37:03,817 INFO SenderThread:127582 [sender.py:_start_run_threads():1123] run started: bz9fpvey with start time 1715704623.533121 +2024-05-14 16:37:03,823 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:37:03,824 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:37:03,913 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:37:03,915 DEBUG HandlerThread:127582 [system_info.py:__init__():26] System info init +2024-05-14 16:37:03,915 DEBUG HandlerThread:127582 [system_info.py:__init__():41] System info init done +2024-05-14 16:37:03,915 INFO HandlerThread:127582 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:37:03,915 INFO SystemMonitor:127582 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:37:03,915 INFO HandlerThread:127582 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:37:03,916 INFO SystemMonitor:127582 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:37:03,916 INFO SystemMonitor:127582 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:37:03,916 INFO SystemMonitor:127582 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:37:03,916 INFO SystemMonitor:127582 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:37:03,961 DEBUG HandlerThread:127582 [system_info.py:probe():150] Probing system +2024-05-14 16:37:03,970 DEBUG HandlerThread:127582 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:37:03,990 ERROR HandlerThread:127582 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:37:03,990 DEBUG HandlerThread:127582 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:37:03,990 DEBUG HandlerThread:127582 [system_info.py:probe():198] Probing system done +2024-05-14 16:37:03,990 DEBUG HandlerThread:127582 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:37:03.962034', 'startedAt': '2024-05-14T16:37:03.522848', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.650802631579, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.003, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.996, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3215.606, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3213.19, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4235305786133}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:37:03,990 INFO HandlerThread:127582 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:37:03,990 INFO HandlerThread:127582 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:37:03,991 INFO HandlerThread:127582 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:37:03,995 DEBUG SenderThread:127582 [sender.py:send():378] send: files +2024-05-14 16:37:03,995 INFO SenderThread:127582 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:37:04,092 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:37:04,092 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:37:04,092 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:37:04,093 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:37:04,325 DEBUG SenderThread:127582 [sender.py:send():378] send: telemetry +2024-05-14 16:37:04,501 INFO wandb-upload_0:127582 [upload_job.py:push():130] Uploaded file /tmp/tmphsgr8lbvwandb/h7un6xlm-wandb-metadata.json +2024-05-14 16:37:04,818 INFO Thread-12 :127582 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log +2024-05-14 16:37:04,818 INFO Thread-12 :127582 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-metadata.json +2024-05-14 16:37:04,818 INFO Thread-12 :127582 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt +2024-05-14 16:37:06,818 INFO Thread-12 :127582 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log +2024-05-14 16:37:08,626 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:09,686 DEBUG SenderThread:127582 [sender.py:send():378] send: exit +2024-05-14 16:37:09,686 INFO SenderThread:127582 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:37:09,686 INFO SenderThread:127582 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 16:37:09,687 INFO SenderThread:127582 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:09,687 INFO SenderThread:127582 [sender.py:send_exit():593] send defer +2024-05-14 16:37:09,687 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,688 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:37:09,688 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,688 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:37:09,688 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:37:09,688 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,688 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:37:09,688 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,688 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:37:09,688 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:37:09,688 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,688 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:37:09,688 INFO HandlerThread:127582 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:37:09,688 DEBUG SystemMonitor:127582 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:37:09,689 INFO HandlerThread:127582 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:37:09,689 DEBUG SystemMonitor:127582 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:37:09,689 INFO HandlerThread:127582 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:37:09,689 DEBUG SystemMonitor:127582 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:37:09,689 INFO HandlerThread:127582 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:37:09,690 INFO HandlerThread:127582 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:37:09,691 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,691 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:37:09,691 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:37:09,691 DEBUG SenderThread:127582 [sender.py:send():378] send: stats +2024-05-14 16:37:09,691 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,692 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:37:09,692 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,692 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:37:09,692 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:37:09,692 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,692 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:37:09,692 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,692 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:37:09,692 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:37:09,692 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,693 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:37:09,693 DEBUG SenderThread:127582 [sender.py:send():378] send: summary +2024-05-14 16:37:09,693 INFO SenderThread:127582 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:09,694 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,694 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:37:09,694 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:37:09,694 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,694 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:37:09,694 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,694 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:37:09,699 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:09,783 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:37:09,783 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,783 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:37:09,783 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,784 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:37:09,820 INFO Thread-12 :127582 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml +2024-05-14 16:37:09,820 INFO Thread-12 :127582 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json +2024-05-14 16:37:10,338 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:37:10,338 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,338 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:37:10,338 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,338 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:37:10,339 INFO SenderThread:127582 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:37:10,339 INFO SenderThread:127582 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:37:10,339 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:37:10,339 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,339 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:37:10,339 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,339 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:37:10,339 INFO SenderThread:127582 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:37:10,686 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:10,821 INFO SenderThread:127582 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log +2024-05-14 16:37:10,821 INFO SenderThread:127582 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files +2024-05-14 16:37:10,822 INFO SenderThread:127582 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml config.yaml +2024-05-14 16:37:10,822 INFO SenderThread:127582 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log output.log +2024-05-14 16:37:10,822 INFO SenderThread:127582 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt requirements.txt +2024-05-14 16:37:10,822 INFO SenderThread:127582 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:37:10,822 INFO SenderThread:127582 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json wandb-summary.json +2024-05-14 16:37:10,822 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:37:10,823 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:10,824 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,825 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:37:10,827 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,827 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:37:10,827 INFO SenderThread:127582 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:11,056 INFO wandb-upload_1:127582 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/output.log +2024-05-14 16:37:11,257 INFO wandb-upload_0:127582 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/config.yaml +2024-05-14 16:37:11,311 INFO wandb-upload_3:127582 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/wandb-summary.json +2024-05-14 16:37:11,332 INFO wandb-upload_2:127582 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/files/requirements.txt +2024-05-14 16:37:11,532 INFO Thread-11 (_thread_body):127582 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:37:11,533 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:11,533 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:37:11,533 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:11,533 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:37:11,533 INFO SenderThread:127582 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:37:11,533 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:37:11,533 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:11,533 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:37:11,534 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:11,534 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:37:11,534 INFO SenderThread:127582 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:37:11,686 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:11,756 INFO SenderThread:127582 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:37:11,756 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:37:11,756 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:11,756 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:11,756 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:37:11,756 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:11,756 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:37:11,756 INFO SenderThread:127582 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:37:11,757 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:11,757 DEBUG SenderThread:127582 [sender.py:send():378] send: final +2024-05-14 16:37:11,757 INFO HandlerThread:127582 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:37:11,757 DEBUG SenderThread:127582 [sender.py:send():378] send: footer +2024-05-14 16:37:11,757 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:11,757 INFO SenderThread:127582 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:37:11,757 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:11,757 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:11,757 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:11,757 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:11,758 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:37:11,758 DEBUG SenderThread:127582 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:37:11,759 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:37:11,759 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:37:11,759 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:37:11,821 INFO MainThread:127582 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:37:11,821 INFO MainThread:127582 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:37:11,821 INFO MainThread:127582 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:37:11,821 DEBUG HandlerThread:127582 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:37:11,821 INFO HandlerThread:127582 [handler.py:finish():882] shutting down handler +2024-05-14 16:37:12,758 INFO WriterThread:127582 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/run-bz9fpvey.wandb +2024-05-14 16:37:12,821 INFO SenderThread:127582 [sender.py:finish():1545] shutting down sender +2024-05-14 16:37:12,821 INFO SenderThread:127582 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:12,821 INFO SenderThread:127582 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..302ae6efd07bb2f764508c68daed8166d3674db0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Configure stats pid to 126390 +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:37:03,530 WARNING MainThread:126390 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug.log +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/logs/debug-internal.log +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:init():610] starting backend +2024-05-14 16:37:03,530 INFO MainThread:126390 [wandb_init.py:init():614] setting up manager +2024-05-14 16:37:03,532 INFO MainThread:126390 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:37:03,532 INFO MainThread:126390 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:37:03,536 INFO MainThread:126390 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:37:03,547 INFO MainThread:126390 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:37:03,823 INFO MainThread:126390 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:37:03,909 INFO MainThread:126390 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:37:03,910 INFO MainThread:126390 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:37:04,092 INFO MainThread:126390 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:37:04,092 INFO MainThread:126390 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:37:04,092 INFO MainThread:126390 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:37:04,092 INFO MainThread:126390 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:37:04,093 INFO MainThread:126390 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:37:12,822 WARNING MsgRouterThr:126390 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/run-bz9fpvey.wandb b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/run-bz9fpvey.wandb new file mode 100644 index 0000000000000000000000000000000000000000..380b0502fea71175780d9fa2a08755a1d21bbb06 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163703-bz9fpvey/run-bz9fpvey.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f6c8298d7f6388c49b502891050f1a4aef6d7c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705031 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9caf66c897adba0a72606df2732138cdd2c2f5bf --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:43:52,047 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:43:56,680 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:43:56,683 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:43:56,683 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:44:05,545 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:44:05,545 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 16:44:05,088] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:16:44:06,741 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,741 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,762 INFO [task.py:395] Building contexts for indiccopa-hi on rank 4... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 102433.94it/s] +2024-05-14:16:44:08,634 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..68588d5eade3c367a04bb468c3bf733006a6477c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:43:51.903342", + "startedAt": "2024-05-14T16:43:51.420635", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3390.232085526316, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.945, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3288.056, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.001, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4306678771973 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..91df0012cef27fbd76437f2803da1fd4192acd69 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 24}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..806ffe09671b2e41f892dd40f323c98348aa51a9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-14 16:43:51,431 INFO StreamThr :130114 [internal.py:wandb_internal():85] W&B internal server running at pid: 130114, started at: 2024-05-14 16:43:51.431247 +2024-05-14 16:43:51,434 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:43:51,435 INFO WriterThread:130114 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/run-yt38ue1u.wandb +2024-05-14 16:43:51,436 DEBUG SenderThread:130114 [sender.py:send():378] send: header +2024-05-14 16:43:51,445 DEBUG SenderThread:130114 [sender.py:send():378] send: run +2024-05-14 16:43:51,720 INFO SenderThread:130114 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files +2024-05-14 16:43:51,720 INFO SenderThread:130114 [sender.py:_start_run_threads():1123] run started: yt38ue1u with start time 1715705031.43136 +2024-05-14 16:43:51,728 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:43:51,728 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:43:51,813 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:43:51,815 DEBUG HandlerThread:130114 [system_info.py:__init__():26] System info init +2024-05-14 16:43:51,815 DEBUG HandlerThread:130114 [system_info.py:__init__():41] System info init done +2024-05-14 16:43:51,815 INFO HandlerThread:130114 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:43:51,815 INFO SystemMonitor:130114 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:43:51,815 INFO HandlerThread:130114 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:43:51,815 INFO SystemMonitor:130114 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:43:51,815 INFO SystemMonitor:130114 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:43:51,816 INFO SystemMonitor:130114 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:43:51,817 INFO SystemMonitor:130114 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:43:51,903 DEBUG HandlerThread:130114 [system_info.py:probe():150] Probing system +2024-05-14 16:43:51,911 DEBUG HandlerThread:130114 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:43:51,931 ERROR HandlerThread:130114 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:43:51,931 DEBUG HandlerThread:130114 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:43:51,931 DEBUG HandlerThread:130114 [system_info.py:probe():198] Probing system done +2024-05-14 16:43:51,931 DEBUG HandlerThread:130114 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:43:51.903342', 'startedAt': '2024-05-14T16:43:51.420635', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3390.232085526316, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.998, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.998, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.945, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3288.056, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.001, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.003, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4306678771973}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:43:51,932 INFO HandlerThread:130114 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:43:51,932 INFO HandlerThread:130114 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:43:51,934 INFO HandlerThread:130114 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:43:51,940 DEBUG SenderThread:130114 [sender.py:send():378] send: files +2024-05-14 16:43:51,940 INFO SenderThread:130114 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:43:52,039 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:43:52,040 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:43:52,040 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:43:52,040 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:43:52,209 DEBUG SenderThread:130114 [sender.py:send():378] send: telemetry +2024-05-14 16:43:52,487 INFO wandb-upload_0:130114 [upload_job.py:push():130] Uploaded file /tmp/tmpbmv6m6ulwandb/csucs1yj-wandb-metadata.json +2024-05-14 16:43:52,721 INFO Thread-12 :130114 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt +2024-05-14 16:43:52,721 INFO Thread-12 :130114 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-metadata.json +2024-05-14 16:43:52,721 INFO Thread-12 :130114 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:43:54,721 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:43:56,682 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:43:58,723 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:01,684 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:04,728 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:06,729 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:06,742 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:07,040 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:44:07,041 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:44:07,730 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:08,731 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:10,732 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:11,734 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:12,151 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,882 DEBUG SenderThread:130114 [sender.py:send():378] send: exit +2024-05-14 16:44:15,882 INFO SenderThread:130114 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:44:15,883 INFO SenderThread:130114 [sender.py:send_exit():587] handling runtime: 24 +2024-05-14 16:44:15,885 INFO SenderThread:130114 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,885 INFO SenderThread:130114 [sender.py:send_exit():593] send defer +2024-05-14 16:44:15,885 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:44:15,885 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,885 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:44:15,885 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:44:15,885 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,886 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:44:15,886 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,886 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:44:15,886 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:44:15,886 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,886 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:44:15,886 INFO HandlerThread:130114 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130114 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:44:15,887 INFO HandlerThread:130114 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:44:15,887 DEBUG SystemMonitor:130114 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:44:15,887 INFO HandlerThread:130114 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:44:15,887 DEBUG SystemMonitor:130114 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:44:15,887 INFO HandlerThread:130114 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:44:15,889 INFO HandlerThread:130114 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:44:15,889 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,889 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:44:15,889 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:44:15,889 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,889 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:44:15,889 DEBUG SenderThread:130114 [sender.py:send():378] send: stats +2024-05-14 16:44:15,890 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:44:15,890 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:44:15,890 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,890 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:44:15,890 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,891 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:44:15,891 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:44:15,891 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,891 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:44:15,891 DEBUG SenderThread:130114 [sender.py:send():378] send: summary +2024-05-14 16:44:15,891 INFO SenderThread:130114 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,892 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,892 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:44:15,892 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:44:15,892 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,892 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:44:15,892 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,892 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:44:15,894 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,963 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:44:15,964 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,964 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:44:15,964 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,964 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:44:16,737 INFO Thread-12 :130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml +2024-05-14 16:44:16,737 INFO Thread-12 :130114 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json +2024-05-14 16:44:16,882 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,100 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:44:17,100 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,100 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,100 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:44:17,101 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,101 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:44:17,101 INFO SenderThread:130114 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:44:17,101 INFO SenderThread:130114 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:44:17,101 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:44:17,101 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,101 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:44:17,101 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,101 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:44:17,101 INFO SenderThread:130114 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:44:17,738 INFO SenderThread:130114 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:17,738 INFO SenderThread:130114 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files +2024-05-14 16:44:17,738 INFO SenderThread:130114 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log output.log +2024-05-14 16:44:17,739 INFO SenderThread:130114 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml config.yaml +2024-05-14 16:44:17,739 INFO SenderThread:130114 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:44:17,739 INFO SenderThread:130114 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt requirements.txt +2024-05-14 16:44:17,739 INFO SenderThread:130114 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json wandb-summary.json +2024-05-14 16:44:17,739 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:44:17,740 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,741 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:44:17,741 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,743 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:44:17,743 INFO SenderThread:130114 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:17,882 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,883 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,986 INFO wandb-upload_0:130114 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/output.log +2024-05-14 16:44:18,139 INFO wandb-upload_1:130114 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/config.yaml +2024-05-14 16:44:18,219 INFO wandb-upload_3:130114 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/wandb-summary.json +2024-05-14 16:44:18,235 INFO wandb-upload_2:130114 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/files/requirements.txt +2024-05-14 16:44:18,435 INFO Thread-11 (_thread_body):130114 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:44:18,435 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,435 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:44:18,436 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,436 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:44:18,436 INFO SenderThread:130114 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:44:18,436 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:44:18,436 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,436 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:44:18,436 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,436 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:44:18,437 INFO SenderThread:130114 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:44:18,514 INFO SenderThread:130114 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:44:18,514 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:44:18,514 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,514 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:44:18,514 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,514 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:44:18,514 INFO SenderThread:130114 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:44:18,515 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,515 INFO HandlerThread:130114 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:44:18,515 DEBUG SenderThread:130114 [sender.py:send():378] send: final +2024-05-14 16:44:18,515 DEBUG SenderThread:130114 [sender.py:send():378] send: footer +2024-05-14 16:44:18,515 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,515 INFO SenderThread:130114 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:44:18,515 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,516 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,516 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,516 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:44:18,516 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,516 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:44:18,517 DEBUG SenderThread:130114 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:44:18,518 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:44:18,518 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:44:18,582 INFO MainThread:130114 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:44:18,582 INFO MainThread:130114 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:44:18,583 INFO MainThread:130114 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:44:18,583 DEBUG HandlerThread:130114 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:44:18,583 INFO HandlerThread:130114 [handler.py:finish():882] shutting down handler +2024-05-14 16:44:19,517 INFO WriterThread:130114 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/run-yt38ue1u.wandb +2024-05-14 16:44:19,582 INFO SenderThread:130114 [sender.py:finish():1545] shutting down sender +2024-05-14 16:44:19,582 INFO SenderThread:130114 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:19,582 INFO SenderThread:130114 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..1dd7c19cabf5e92e3a4e7440102bde087f71adbc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Configure stats pid to 128891 +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:43:51,428 WARNING MainThread:128891 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug.log +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/logs/debug-internal.log +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:init():610] starting backend +2024-05-14 16:43:51,428 INFO MainThread:128891 [wandb_init.py:init():614] setting up manager +2024-05-14 16:43:51,430 INFO MainThread:128891 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:43:51,431 INFO MainThread:128891 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:43:51,434 INFO MainThread:128891 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:43:51,444 INFO MainThread:128891 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:43:51,727 INFO MainThread:128891 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:43:51,806 INFO MainThread:128891 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:43:51,807 INFO MainThread:128891 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:43:52,040 INFO MainThread:128891 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:43:52,040 INFO MainThread:128891 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:43:52,041 INFO MainThread:128891 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:43:52,041 INFO MainThread:128891 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:43:52,043 INFO MainThread:128891 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:44:19,584 WARNING MsgRouterThr:128891 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/run-yt38ue1u.wandb b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/run-yt38ue1u.wandb new file mode 100644 index 0000000000000000000000000000000000000000..0cb4fda512bcd5699d7704bf337d5a108a0f7431 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164351-yt38ue1u/run-yt38ue1u.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46cf6f00b3bd7716effe94e46d60c979920d0178 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705121 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0f5ab506e5336c7135e9bb491d8ec7cfb6800db8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:45:22,335 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:45:26,996 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:45:26,999 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:45:26,999 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..3d9068b2f4bb4b286b933110310000da6709b231 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:45:22.198954", + "startedAt": "2024-05-14T16:45:21.730662", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3394.2020460526314, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.437, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3153.322, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3328.65, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3328.689, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3328.958, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3211.006, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3305.459, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3210.713, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3212.468, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4299659729004 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..6954e262a614fd82329ed79f50fad2b37ca83f11 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-14 16:45:21,742 INFO StreamThr :137238 [internal.py:wandb_internal():85] W&B internal server running at pid: 137238, started at: 2024-05-14 16:45:21.741887 +2024-05-14 16:45:21,744 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:45:21,745 INFO WriterThread:137238 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/run-67q89u0m.wandb +2024-05-14 16:45:21,746 DEBUG SenderThread:137238 [sender.py:send():378] send: header +2024-05-14 16:45:21,755 DEBUG SenderThread:137238 [sender.py:send():378] send: run +2024-05-14 16:45:22,023 INFO SenderThread:137238 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files +2024-05-14 16:45:22,023 INFO SenderThread:137238 [sender.py:_start_run_threads():1123] run started: 67q89u0m with start time 1715705121.741618 +2024-05-14 16:45:22,029 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:45:22,030 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:45:22,112 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:45:22,114 DEBUG HandlerThread:137238 [system_info.py:__init__():26] System info init +2024-05-14 16:45:22,114 DEBUG HandlerThread:137238 [system_info.py:__init__():41] System info init done +2024-05-14 16:45:22,114 INFO HandlerThread:137238 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:45:22,114 INFO SystemMonitor:137238 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:45:22,114 INFO HandlerThread:137238 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:45:22,114 INFO SystemMonitor:137238 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:45:22,114 INFO SystemMonitor:137238 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:45:22,115 INFO SystemMonitor:137238 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:45:22,115 INFO SystemMonitor:137238 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:45:22,198 DEBUG HandlerThread:137238 [system_info.py:probe():150] Probing system +2024-05-14 16:45:22,207 DEBUG HandlerThread:137238 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:45:22,226 ERROR HandlerThread:137238 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:45:22,226 DEBUG HandlerThread:137238 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:45:22,226 DEBUG HandlerThread:137238 [system_info.py:probe():198] Probing system done +2024-05-14 16:45:22,226 DEBUG HandlerThread:137238 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:45:22.198954', 'startedAt': '2024-05-14T16:45:21.730662', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.2020460526314, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.437, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3153.322, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3328.65, 'min': 800.0, 'max': 3400.0}, {'current': 3328.689, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3328.958, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3211.006, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3305.459, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3210.713, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3212.468, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4299659729004}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:45:22,226 INFO HandlerThread:137238 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:45:22,226 INFO HandlerThread:137238 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:45:22,228 INFO HandlerThread:137238 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:45:22,231 DEBUG SenderThread:137238 [sender.py:send():378] send: files +2024-05-14 16:45:22,231 INFO SenderThread:137238 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:45:22,331 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:45:22,332 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:45:22,332 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:45:22,332 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:45:22,492 DEBUG SenderThread:137238 [sender.py:send():378] send: telemetry +2024-05-14 16:45:22,763 INFO wandb-upload_0:137238 [upload_job.py:push():130] Uploaded file /tmp/tmptjfwh3oqwandb/rkmyee6c-wandb-metadata.json +2024-05-14 16:45:23,025 INFO Thread-12 :137238 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log +2024-05-14 16:45:23,025 INFO Thread-12 :137238 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-metadata.json +2024-05-14 16:45:23,025 INFO Thread-12 :137238 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt +2024-05-14 16:45:25,025 INFO Thread-12 :137238 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log +2024-05-14 16:45:26,997 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:28,083 DEBUG SenderThread:137238 [sender.py:send():378] send: exit +2024-05-14 16:45:28,083 INFO SenderThread:137238 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:45:28,084 INFO SenderThread:137238 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 16:45:28,084 INFO SenderThread:137238 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:28,085 INFO SenderThread:137238 [sender.py:send_exit():593] send defer +2024-05-14 16:45:28,085 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,085 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:45:28,085 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,085 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:45:28,085 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:45:28,085 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,085 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:45:28,085 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,085 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:45:28,085 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:45:28,085 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,085 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:45:28,085 INFO HandlerThread:137238 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:45:28,085 DEBUG SystemMonitor:137238 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:45:28,086 INFO HandlerThread:137238 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:45:28,086 DEBUG SystemMonitor:137238 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:45:28,086 INFO HandlerThread:137238 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:45:28,086 DEBUG SystemMonitor:137238 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:45:28,086 INFO HandlerThread:137238 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:45:28,088 INFO HandlerThread:137238 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:45:28,088 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,088 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:45:28,088 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:45:28,088 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,088 DEBUG SenderThread:137238 [sender.py:send():378] send: stats +2024-05-14 16:45:28,088 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:45:28,089 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,089 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:45:28,089 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:45:28,089 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,089 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:45:28,089 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,089 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:45:28,089 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:45:28,089 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,089 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:45:28,089 DEBUG SenderThread:137238 [sender.py:send():378] send: summary +2024-05-14 16:45:28,090 INFO SenderThread:137238 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:28,090 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,090 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:45:28,090 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:45:28,090 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,090 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:45:28,090 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,090 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:45:28,092 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:28,234 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:45:28,235 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,235 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:45:28,235 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,235 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:45:29,027 INFO Thread-12 :137238 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml +2024-05-14 16:45:29,027 INFO Thread-12 :137238 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log +2024-05-14 16:45:29,028 INFO Thread-12 :137238 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json +2024-05-14 16:45:29,083 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:30,512 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:45:30,512 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:30,512 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:30,512 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:45:30,512 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:30,512 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:45:30,512 INFO SenderThread:137238 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:45:30,513 INFO SenderThread:137238 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:45:30,513 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:45:30,513 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:30,513 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:45:30,513 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:30,513 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:45:30,513 INFO SenderThread:137238 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:45:31,029 INFO SenderThread:137238 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json wandb-summary.json +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml config.yaml +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt requirements.txt +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log output.log +2024-05-14 16:45:31,030 INFO SenderThread:137238 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:45:31,030 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:45:31,033 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:31,033 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:45:31,035 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:31,035 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:45:31,035 INFO SenderThread:137238 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:31,086 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:31,086 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:31,276 INFO wandb-upload_1:137238 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/config.yaml +2024-05-14 16:45:31,432 INFO wandb-upload_0:137238 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/wandb-summary.json +2024-05-14 16:45:31,503 INFO wandb-upload_2:137238 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/requirements.txt +2024-05-14 16:45:31,526 INFO wandb-upload_3:137238 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/files/output.log +2024-05-14 16:45:31,726 INFO Thread-11 (_thread_body):137238 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:45:31,727 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:31,727 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:45:31,727 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:31,728 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:45:31,728 INFO SenderThread:137238 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:45:31,728 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:45:31,728 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:31,728 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:45:31,728 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:31,728 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:45:31,728 INFO SenderThread:137238 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:45:31,946 INFO SenderThread:137238 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:45:31,946 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:45:31,946 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:31,946 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:45:31,946 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:31,946 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:45:31,947 INFO SenderThread:137238 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:45:31,947 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:31,947 INFO HandlerThread:137238 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:45:31,947 DEBUG SenderThread:137238 [sender.py:send():378] send: final +2024-05-14 16:45:31,947 DEBUG SenderThread:137238 [sender.py:send():378] send: footer +2024-05-14 16:45:31,947 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:31,947 INFO SenderThread:137238 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:45:31,947 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:31,948 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:31,948 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:31,948 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:31,948 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:45:31,948 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:45:31,948 DEBUG SenderThread:137238 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:45:31,950 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:45:31,950 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:45:32,011 INFO MainThread:137238 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:45:32,011 INFO MainThread:137238 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:45:32,011 INFO MainThread:137238 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:45:32,011 DEBUG HandlerThread:137238 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:45:32,011 INFO HandlerThread:137238 [handler.py:finish():882] shutting down handler +2024-05-14 16:45:32,948 INFO WriterThread:137238 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/run-67q89u0m.wandb +2024-05-14 16:45:33,011 INFO SenderThread:137238 [sender.py:finish():1545] shutting down sender +2024-05-14 16:45:33,011 INFO SenderThread:137238 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:33,011 INFO SenderThread:137238 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..6a52fe97204421eb72a3c1e1515fd1cea8814cbe --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Configure stats pid to 136020 +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:45:21,738 WARNING MainThread:136020 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug.log +2024-05-14 16:45:21,738 INFO MainThread:136020 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/logs/debug-internal.log +2024-05-14 16:45:21,739 INFO MainThread:136020 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:45:21,739 INFO MainThread:136020 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:45:21,739 INFO MainThread:136020 [wandb_init.py:init():610] starting backend +2024-05-14 16:45:21,739 INFO MainThread:136020 [wandb_init.py:init():614] setting up manager +2024-05-14 16:45:21,740 INFO MainThread:136020 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:45:21,741 INFO MainThread:136020 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:45:21,744 INFO MainThread:136020 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:45:21,755 INFO MainThread:136020 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:45:22,029 INFO MainThread:136020 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:45:22,108 INFO MainThread:136020 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:45:22,108 INFO MainThread:136020 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:45:22,332 INFO MainThread:136020 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:45:22,332 INFO MainThread:136020 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:45:22,332 INFO MainThread:136020 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:45:22,332 INFO MainThread:136020 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:45:22,333 INFO MainThread:136020 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:45:33,012 WARNING MsgRouterThr:136020 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/run-67q89u0m.wandb b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/run-67q89u0m.wandb new file mode 100644 index 0000000000000000000000000000000000000000..cb83f86ee74fa9e342341570f5a1ee4ea617c991 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164521-67q89u0m/run-67q89u0m.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..526165565633b2a26772f1772a11cb54bb286f29 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716403431 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..dcc013716e3f635836bb2592f0e5075e9ec10a5f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:43:52,495 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:44:01,054 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:44:01,054 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:44:01,055 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000'} +2024-05-22:18:44:03,348 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..3ad1386712831ff9cda4ad24df71f57f1b3b87b3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:43:52.293616", + "startedAt": "2024-05-22T18:43:51.780710", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2361.05825625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.6395492553711 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..d7205de55640e09f41f2170f7cdb4b1d3fb82bec --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 18:43:51,804 INFO StreamThr :1316 [internal.py:wandb_internal():85] W&B internal server running at pid: 1316, started at: 2024-05-22 18:43:51.800707 +2024-05-22 18:43:51,807 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:43:51,807 INFO WriterThread:1316 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/run-lgo5xka7.wandb +2024-05-22 18:43:51,809 DEBUG SenderThread:1316 [sender.py:send():378] send: header +2024-05-22 18:43:51,813 DEBUG SenderThread:1316 [sender.py:send():378] send: run +2024-05-22 18:43:52,102 INFO SenderThread:1316 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files +2024-05-22 18:43:52,102 INFO SenderThread:1316 [sender.py:_start_run_threads():1123] run started: lgo5xka7 with start time 1716403431.800571 +2024-05-22 18:43:52,104 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:43:52,104 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:43:52,219 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:43:52,221 DEBUG HandlerThread:1316 [system_info.py:__init__():26] System info init +2024-05-22 18:43:52,221 DEBUG HandlerThread:1316 [system_info.py:__init__():41] System info init done +2024-05-22 18:43:52,221 INFO HandlerThread:1316 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:43:52,221 INFO SystemMonitor:1316 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:43:52,221 INFO HandlerThread:1316 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:43:52,228 INFO SystemMonitor:1316 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:43:52,234 INFO SystemMonitor:1316 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:43:52,234 INFO SystemMonitor:1316 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:43:52,234 INFO SystemMonitor:1316 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:43:52,293 DEBUG HandlerThread:1316 [system_info.py:probe():150] Probing system +2024-05-22 18:43:52,297 DEBUG HandlerThread:1316 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:43:52,307 ERROR HandlerThread:1316 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:43:52,307 DEBUG HandlerThread:1316 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:43:52,307 DEBUG HandlerThread:1316 [system_info.py:probe():198] Probing system done +2024-05-22 18:43:52,307 DEBUG HandlerThread:1316 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:43:52.293616', 'startedAt': '2024-05-22T18:43:51.780710', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2361.05825625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6395492553711}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:43:52,307 INFO HandlerThread:1316 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:43:52,307 INFO HandlerThread:1316 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:43:52,310 INFO HandlerThread:1316 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:43:52,315 DEBUG SenderThread:1316 [sender.py:send():378] send: files +2024-05-22 18:43:52,316 INFO SenderThread:1316 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:43:52,489 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:43:52,489 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:43:52,491 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:43:52,491 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:43:52,634 DEBUG SenderThread:1316 [sender.py:send():378] send: telemetry +2024-05-22 18:43:52,874 INFO wandb-upload_0:1316 [upload_job.py:push():130] Uploaded file /tmp/tmpwwegx46wwandb/bje497tc-wandb-metadata.json +2024-05-22 18:43:53,104 INFO Thread-12 :1316 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log +2024-05-22 18:43:53,104 INFO Thread-12 :1316 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-metadata.json +2024-05-22 18:43:53,105 INFO Thread-12 :1316 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt +2024-05-22 18:43:55,104 INFO Thread-12 :1316 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log +2024-05-22 18:43:57,637 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:03,056 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:03,111 INFO Thread-12 :1316 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log +2024-05-22 18:44:03,355 DEBUG SenderThread:1316 [sender.py:send():378] send: exit +2024-05-22 18:44:03,356 INFO SenderThread:1316 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:44:03,356 INFO SenderThread:1316 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:44:03,357 INFO SenderThread:1316 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:44:03,357 INFO SenderThread:1316 [sender.py:send_exit():593] send defer +2024-05-22 18:44:03,358 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,358 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:44:03,358 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,358 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:44:03,358 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:44:03,358 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,358 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:44:03,358 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,358 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:44:03,358 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:44:03,358 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,358 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:44:03,358 INFO HandlerThread:1316 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:44:03,359 INFO HandlerThread:1316 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:44:03,359 INFO HandlerThread:1316 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:44:03,359 DEBUG SystemMonitor:1316 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:44:03,359 INFO HandlerThread:1316 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:44:03,359 DEBUG SystemMonitor:1316 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:44:03,359 INFO HandlerThread:1316 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:44:03,359 DEBUG SystemMonitor:1316 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:44:03,361 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,361 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:44:03,361 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:44:03,361 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,361 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:44:03,361 DEBUG SenderThread:1316 [sender.py:send():378] send: stats +2024-05-22 18:44:03,362 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,362 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:44:03,362 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:44:03,363 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,363 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:44:03,363 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,363 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:44:03,363 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:44:03,363 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,363 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:44:03,363 DEBUG SenderThread:1316 [sender.py:send():378] send: summary +2024-05-22 18:44:03,364 INFO SenderThread:1316 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:44:03,364 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,364 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:44:03,364 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:44:03,364 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,364 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:44:03,364 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,364 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:44:03,369 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:03,599 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:44:03,599 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:03,599 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:44:03,599 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:03,599 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:44:04,113 INFO Thread-12 :1316 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml +2024-05-22 18:44:04,113 INFO Thread-12 :1316 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json +2024-05-22 18:44:04,355 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:04,658 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:44:04,658 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:04,658 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:04,658 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:44:04,659 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:04,659 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:44:04,659 INFO SenderThread:1316 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:44:04,659 INFO SenderThread:1316 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:44:04,659 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:44:04,659 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:04,659 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:44:04,660 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:04,660 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:44:04,660 INFO SenderThread:1316 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:44:05,114 INFO SenderThread:1316 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log +2024-05-22 18:44:05,114 INFO SenderThread:1316 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files +2024-05-22 18:44:05,115 INFO SenderThread:1316 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt requirements.txt +2024-05-22 18:44:05,115 INFO SenderThread:1316 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:44:05,115 INFO SenderThread:1316 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log output.log +2024-05-22 18:44:05,117 INFO SenderThread:1316 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml config.yaml +2024-05-22 18:44:05,119 INFO SenderThread:1316 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json wandb-summary.json +2024-05-22 18:44:05,121 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:44:05,122 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:05,122 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:44:05,122 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:05,122 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:44:05,122 INFO SenderThread:1316 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:44:05,366 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:05,366 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:05,445 INFO wandb-upload_0:1316 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/requirements.txt +2024-05-22 18:44:05,692 INFO wandb-upload_3:1316 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/wandb-summary.json +2024-05-22 18:44:05,722 INFO wandb-upload_1:1316 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/output.log +2024-05-22 18:44:05,733 INFO wandb-upload_2:1316 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/files/config.yaml +2024-05-22 18:44:05,933 INFO Thread-11 (_thread_body):1316 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:44:05,934 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:05,934 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:44:05,934 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:05,934 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:44:05,934 INFO SenderThread:1316 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:44:05,934 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:44:05,934 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:05,934 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:44:05,934 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:05,934 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:44:05,934 INFO SenderThread:1316 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:44:05,998 INFO SenderThread:1316 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:44:05,999 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:44:05,999 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:05,999 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:44:05,999 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:05,999 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:44:05,999 INFO SenderThread:1316 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:44:05,999 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:05,999 INFO HandlerThread:1316 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:44:05,999 DEBUG SenderThread:1316 [sender.py:send():378] send: final +2024-05-22 18:44:05,999 DEBUG SenderThread:1316 [sender.py:send():378] send: footer +2024-05-22 18:44:05,999 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:05,999 INFO SenderThread:1316 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:44:06,000 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:44:06,001 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:06,001 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:06,001 DEBUG SenderThread:1316 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:44:06,055 INFO MainThread:1316 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:44:06,055 INFO MainThread:1316 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:44:06,055 INFO MainThread:1316 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:44:06,055 DEBUG HandlerThread:1316 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:44:06,055 INFO HandlerThread:1316 [handler.py:finish():882] shutting down handler +2024-05-22 18:44:07,001 INFO WriterThread:1316 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/run-lgo5xka7.wandb +2024-05-22 18:44:07,055 INFO SenderThread:1316 [sender.py:finish():1545] shutting down sender +2024-05-22 18:44:07,055 INFO SenderThread:1316 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:44:07,055 INFO SenderThread:1316 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..782c39e24633e4c86a4ce0aa99e02a8b21002b06 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:43:51,794 INFO MainThread:1161 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:43:51,794 INFO MainThread:1161 [wandb_setup.py:_flush():76] Configure stats pid to 1161 +2024-05-22 18:43:51,794 INFO MainThread:1161 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:43:51,794 INFO MainThread:1161 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:43:51,795 WARNING MainThread:1161 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug.log +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/logs/debug-internal.log +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:init():610] starting backend +2024-05-22 18:43:51,795 INFO MainThread:1161 [wandb_init.py:init():614] setting up manager +2024-05-22 18:43:51,799 INFO MainThread:1161 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:43:51,800 INFO MainThread:1161 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:43:51,803 INFO MainThread:1161 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:43:51,812 INFO MainThread:1161 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:43:52,104 INFO MainThread:1161 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:43:52,212 INFO MainThread:1161 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:43:52,213 INFO MainThread:1161 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:43:52,490 INFO MainThread:1161 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:43:52,490 INFO MainThread:1161 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:43:52,490 INFO MainThread:1161 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:43:52,490 INFO MainThread:1161 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:43:52,492 INFO MainThread:1161 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:44:07,056 WARNING MsgRouterThr:1161 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/run-lgo5xka7.wandb b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/run-lgo5xka7.wandb new file mode 100644 index 0000000000000000000000000000000000000000..bfd0d2ccb2739524dca298a6e49472ee6ec75084 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_184351-lgo5xka7/run-lgo5xka7.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a36859c5814b83ea935081772f44392f7579a1c2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716447371 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..613b7cfa20c88a432ca169a4cf887bd63029c5ac --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:06:56:11,822 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:06:56:20,245 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:06:56:20,246 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:06:56:20,246 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'} +2024-05-23:06:56:22,576 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..094efea889e07cd1194f661d9bac8791e99088d6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt @@ -0,0 +1,154 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.35 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..a95699ae2aee0a82457296fdbd53f10051d6b6c4 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T06:56:11.619532", + "startedAt": "2024-05-23T06:56:11.039733", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2331.796875, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3373.442, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3373.456, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 203.89686965942383 + } + }, + "memory": { + "total": 1007.4379196166992 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..bc37904ed7430c8e2a63b68b4f5a4d91cb802a79 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 06:56:11,061 INFO StreamThr :1626 [internal.py:wandb_internal():85] W&B internal server running at pid: 1626, started at: 2024-05-23 06:56:11.059033 +2024-05-23 06:56:11,065 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: status +2024-05-23 06:56:11,066 INFO WriterThread:1626 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/run-0p73sxw1.wandb +2024-05-23 06:56:11,072 DEBUG SenderThread:1626 [sender.py:send():378] send: header +2024-05-23 06:56:11,073 DEBUG SenderThread:1626 [sender.py:send():378] send: run +2024-05-23 06:56:11,337 INFO SenderThread:1626 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files +2024-05-23 06:56:11,337 INFO SenderThread:1626 [sender.py:_start_run_threads():1123] run started: 0p73sxw1 with start time 1716447371.058886 +2024-05-23 06:56:11,342 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 06:56:11,342 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: check_version +2024-05-23 06:56:11,562 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 06:56:11,564 DEBUG HandlerThread:1626 [system_info.py:__init__():26] System info init +2024-05-23 06:56:11,564 DEBUG HandlerThread:1626 [system_info.py:__init__():41] System info init done +2024-05-23 06:56:11,564 INFO HandlerThread:1626 [system_monitor.py:start():194] Starting system monitor +2024-05-23 06:56:11,564 INFO SystemMonitor:1626 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 06:56:11,564 INFO HandlerThread:1626 [system_monitor.py:probe():214] Collecting system info +2024-05-23 06:56:11,578 INFO SystemMonitor:1626 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 06:56:11,579 INFO SystemMonitor:1626 [interfaces.py:start():188] Started disk monitoring +2024-05-23 06:56:11,581 INFO SystemMonitor:1626 [interfaces.py:start():188] Started memory monitoring +2024-05-23 06:56:11,602 INFO SystemMonitor:1626 [interfaces.py:start():188] Started network monitoring +2024-05-23 06:56:11,619 DEBUG HandlerThread:1626 [system_info.py:probe():150] Probing system +2024-05-23 06:56:11,622 DEBUG HandlerThread:1626 [system_info.py:_probe_git():135] Probing git +2024-05-23 06:56:11,632 ERROR HandlerThread:1626 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 06:56:11,632 DEBUG HandlerThread:1626 [system_info.py:_probe_git():143] Probing git done +2024-05-23 06:56:11,632 DEBUG HandlerThread:1626 [system_info.py:probe():198] Probing system done +2024-05-23 06:56:11,632 DEBUG HandlerThread:1626 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T06:56:11.619532', 'startedAt': '2024-05-23T06:56:11.039733', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2331.796875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3373.442, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3373.456, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 203.89686965942383}}, 'memory': {'total': 1007.4379196166992}} +2024-05-23 06:56:11,633 INFO HandlerThread:1626 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 06:56:11,633 INFO HandlerThread:1626 [system_monitor.py:probe():227] Publishing system info +2024-05-23 06:56:11,635 INFO HandlerThread:1626 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 06:56:11,640 DEBUG SenderThread:1626 [sender.py:send():378] send: files +2024-05-23 06:56:11,641 INFO SenderThread:1626 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 06:56:11,814 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 06:56:11,815 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: python_packages +2024-05-23 06:56:11,817 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 06:56:11,817 DEBUG SenderThread:1626 [sender.py:send():378] send: telemetry +2024-05-23 06:56:11,818 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: stop_status +2024-05-23 06:56:12,314 INFO wandb-upload_0:1626 [upload_job.py:push():130] Uploaded file /tmp/tmplxfdhhzhwandb/9ifls83e-wandb-metadata.json +2024-05-23 06:56:12,339 INFO Thread-12 :1626 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-metadata.json +2024-05-23 06:56:12,340 INFO Thread-12 :1626 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt +2024-05-23 06:56:12,340 INFO Thread-12 :1626 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log +2024-05-23 06:56:14,339 INFO Thread-12 :1626 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log +2024-05-23 06:56:16,971 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 06:56:22,247 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 06:56:22,346 INFO Thread-12 :1626 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log +2024-05-23 06:56:22,584 DEBUG SenderThread:1626 [sender.py:send():378] send: exit +2024-05-23 06:56:22,584 INFO SenderThread:1626 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 06:56:22,584 INFO SenderThread:1626 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 06:56:22,585 INFO SenderThread:1626 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 06:56:22,585 INFO SenderThread:1626 [sender.py:send_exit():593] send defer +2024-05-23 06:56:22,586 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,586 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 06:56:22,586 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,586 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 06:56:22,586 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 1 +2024-05-23 06:56:22,586 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,586 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 06:56:22,586 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,586 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 06:56:22,586 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 2 +2024-05-23 06:56:22,586 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,586 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 06:56:22,586 INFO HandlerThread:1626 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 06:56:22,587 DEBUG SystemMonitor:1626 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 06:56:22,587 DEBUG SystemMonitor:1626 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 06:56:22,587 DEBUG SystemMonitor:1626 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 06:56:22,589 INFO HandlerThread:1626 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 06:56:22,590 INFO HandlerThread:1626 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 06:56:22,590 INFO HandlerThread:1626 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 06:56:22,590 INFO HandlerThread:1626 [interfaces.py:finish():200] Joined network monitor +2024-05-23 06:56:22,590 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,590 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 06:56:22,590 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 3 +2024-05-23 06:56:22,590 DEBUG SenderThread:1626 [sender.py:send():378] send: stats +2024-05-23 06:56:22,591 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,591 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 06:56:22,592 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,592 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 06:56:22,592 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 4 +2024-05-23 06:56:22,592 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,592 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 06:56:22,592 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,592 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 06:56:22,592 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 5 +2024-05-23 06:56:22,592 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,592 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 06:56:22,592 DEBUG SenderThread:1626 [sender.py:send():378] send: summary +2024-05-23 06:56:22,593 INFO SenderThread:1626 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 06:56:22,593 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,593 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 06:56:22,593 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 6 +2024-05-23 06:56:22,593 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,593 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 06:56:22,593 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,594 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 06:56:22,598 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 06:56:22,665 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 7 +2024-05-23 06:56:22,665 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:22,665 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 06:56:22,665 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:22,665 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 06:56:23,347 INFO Thread-12 :1626 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml +2024-05-23 06:56:23,347 INFO Thread-12 :1626 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json +2024-05-23 06:56:23,584 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 06:56:23,990 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 8 +2024-05-23 06:56:23,991 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 06:56:23,991 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:23,991 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 06:56:23,991 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:23,991 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 06:56:23,991 INFO SenderThread:1626 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 06:56:23,991 INFO SenderThread:1626 [job_builder.py:_get_source_type():576] no source found +2024-05-23 06:56:23,992 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 9 +2024-05-23 06:56:23,992 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:23,992 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 06:56:23,992 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:23,992 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 06:56:23,992 INFO SenderThread:1626 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 06:56:24,348 INFO SenderThread:1626 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log +2024-05-23 06:56:24,348 INFO SenderThread:1626 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files +2024-05-23 06:56:24,348 INFO SenderThread:1626 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log output.log +2024-05-23 06:56:24,348 INFO SenderThread:1626 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-metadata.json wandb-metadata.json +2024-05-23 06:56:24,349 INFO SenderThread:1626 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml config.yaml +2024-05-23 06:56:24,351 INFO SenderThread:1626 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json wandb-summary.json +2024-05-23 06:56:24,353 INFO SenderThread:1626 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt requirements.txt +2024-05-23 06:56:24,353 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 10 +2024-05-23 06:56:24,353 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:24,353 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 06:56:24,355 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:24,356 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 06:56:24,356 INFO SenderThread:1626 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 06:56:24,584 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 06:56:24,584 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 06:56:24,597 INFO wandb-upload_0:1626 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/output.log +2024-05-23 06:56:24,906 INFO wandb-upload_3:1626 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/requirements.txt +2024-05-23 06:56:24,963 INFO wandb-upload_2:1626 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/wandb-summary.json +2024-05-23 06:56:24,972 INFO wandb-upload_1:1626 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/files/config.yaml +2024-05-23 06:56:25,172 INFO Thread-11 (_thread_body):1626 [sender.py:transition_state():613] send defer: 11 +2024-05-23 06:56:25,172 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:25,172 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 06:56:25,173 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:25,173 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 06:56:25,173 INFO SenderThread:1626 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 06:56:25,173 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 12 +2024-05-23 06:56:25,173 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:25,173 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 06:56:25,173 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:25,173 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 06:56:25,173 INFO SenderThread:1626 [file_stream.py:finish():601] file stream finish called +2024-05-23 06:56:25,233 INFO SenderThread:1626 [file_stream.py:finish():605] file stream finish is done +2024-05-23 06:56:25,233 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 13 +2024-05-23 06:56:25,233 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:25,233 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 06:56:25,233 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:25,233 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 06:56:25,233 INFO SenderThread:1626 [sender.py:transition_state():613] send defer: 14 +2024-05-23 06:56:25,234 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: defer +2024-05-23 06:56:25,234 INFO HandlerThread:1626 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 06:56:25,234 DEBUG SenderThread:1626 [sender.py:send():378] send: final +2024-05-23 06:56:25,234 DEBUG SenderThread:1626 [sender.py:send():378] send: footer +2024-05-23 06:56:25,234 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: defer +2024-05-23 06:56:25,234 INFO SenderThread:1626 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 06:56:25,235 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 06:56:25,235 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 06:56:25,235 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 06:56:25,236 DEBUG SenderThread:1626 [sender.py:send_request():405] send_request: server_info +2024-05-23 06:56:25,296 INFO MainThread:1626 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 06:56:25,297 INFO MainThread:1626 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 06:56:25,297 INFO MainThread:1626 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 06:56:25,297 DEBUG HandlerThread:1626 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 06:56:25,297 INFO HandlerThread:1626 [handler.py:finish():882] shutting down handler +2024-05-23 06:56:26,235 INFO WriterThread:1626 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/run-0p73sxw1.wandb +2024-05-23 06:56:26,296 INFO SenderThread:1626 [sender.py:finish():1545] shutting down sender +2024-05-23 06:56:26,297 INFO SenderThread:1626 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 06:56:26,297 INFO SenderThread:1626 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..7989ad0fcf0be11fc7b94f6367245a865e1f830d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Configure stats pid to 1471 +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 06:56:11,053 WARNING MainThread:1471 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug.log +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/logs/debug-internal.log +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:init():560] calling init triggers +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:init():610] starting backend +2024-05-23 06:56:11,053 INFO MainThread:1471 [wandb_init.py:init():614] setting up manager +2024-05-23 06:56:11,057 INFO MainThread:1471 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 06:56:11,058 INFO MainThread:1471 [wandb_init.py:init():622] backend started and connected +2024-05-23 06:56:11,062 INFO MainThread:1471 [wandb_init.py:init():711] updated telemetry +2024-05-23 06:56:11,070 INFO MainThread:1471 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 06:56:11,342 INFO MainThread:1471 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 06:56:11,555 INFO MainThread:1471 [wandb_run.py:_on_init():2405] got version response +2024-05-23 06:56:11,555 INFO MainThread:1471 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 06:56:11,815 INFO MainThread:1471 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 06:56:11,816 INFO MainThread:1471 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 06:56:11,816 INFO MainThread:1471 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 06:56:11,816 INFO MainThread:1471 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 06:56:11,819 INFO MainThread:1471 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 06:56:26,298 WARNING MsgRouterThr:1471 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/run-0p73sxw1.wandb b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/run-0p73sxw1.wandb new file mode 100644 index 0000000000000000000000000000000000000000..4154094ef8e57b4c9737951a14576265461a29d1 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_065611-0p73sxw1/run-0p73sxw1.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8af32e357fe3cf6f645f23b731629342cd8f30f7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716467340 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..6998acf479dab1d34d2b9b42c8d60d3fd475ed59 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:29:00,862 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:29:10,017 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:29:10,018 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:29:10,018 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:12:29:12,454 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..aedb26673fa5e29576324a3390a20b692b2d5058 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:29:00.653777", + "startedAt": "2024-05-23T12:28:59.979054", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.5000124999997, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.5792465209961 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..e5ae4b1964e6b9120aa68db8781876725e3e763f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-23 12:29:00,006 INFO StreamThr :813 [internal.py:wandb_internal():85] W&B internal server running at pid: 813, started at: 2024-05-23 12:29:00.003677 +2024-05-23 12:29:00,010 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:29:00,011 INFO WriterThread:813 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/run-opxox6sx.wandb +2024-05-23 12:29:00,014 DEBUG SenderThread:813 [sender.py:send():378] send: header +2024-05-23 12:29:00,015 DEBUG SenderThread:813 [sender.py:send():378] send: run +2024-05-23 12:29:00,420 INFO SenderThread:813 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files +2024-05-23 12:29:00,421 INFO SenderThread:813 [sender.py:_start_run_threads():1123] run started: opxox6sx with start time 1716467340.003751 +2024-05-23 12:29:00,426 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:29:00,427 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:29:00,543 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:29:00,545 DEBUG HandlerThread:813 [system_info.py:__init__():26] System info init +2024-05-23 12:29:00,545 DEBUG HandlerThread:813 [system_info.py:__init__():41] System info init done +2024-05-23 12:29:00,545 INFO HandlerThread:813 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:29:00,546 INFO SystemMonitor:813 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:29:00,546 INFO HandlerThread:813 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:29:00,553 INFO SystemMonitor:813 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:29:00,559 INFO SystemMonitor:813 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:29:00,559 INFO SystemMonitor:813 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:29:00,560 INFO SystemMonitor:813 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:29:00,653 DEBUG HandlerThread:813 [system_info.py:probe():150] Probing system +2024-05-23 12:29:00,656 DEBUG HandlerThread:813 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:29:00,666 ERROR HandlerThread:813 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:29:00,666 DEBUG HandlerThread:813 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:29:00,666 DEBUG HandlerThread:813 [system_info.py:probe():198] Probing system done +2024-05-23 12:29:00,666 DEBUG HandlerThread:813 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:29:00.653777', 'startedAt': '2024-05-23T12:28:59.979054', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.5000124999997, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.5792465209961}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 12:29:00,666 INFO HandlerThread:813 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:29:00,666 INFO HandlerThread:813 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:29:00,669 INFO HandlerThread:813 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:29:00,675 DEBUG SenderThread:813 [sender.py:send():378] send: files +2024-05-23 12:29:00,675 INFO SenderThread:813 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:29:00,857 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:29:00,857 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:29:00,857 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:29:00,859 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:29:00,963 DEBUG SenderThread:813 [sender.py:send():378] send: telemetry +2024-05-23 12:29:01,284 INFO wandb-upload_0:813 [upload_job.py:push():130] Uploaded file /tmp/tmp1_2ziduzwandb/ql2ge0kg-wandb-metadata.json +2024-05-23 12:29:01,423 INFO Thread-12 :813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log +2024-05-23 12:29:01,423 INFO Thread-12 :813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt +2024-05-23 12:29:01,423 INFO Thread-12 :813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-metadata.json +2024-05-23 12:29:03,423 INFO Thread-12 :813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log +2024-05-23 12:29:05,974 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:29:11,018 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:29:11,430 INFO Thread-12 :813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log +2024-05-23 12:29:12,462 DEBUG SenderThread:813 [sender.py:send():378] send: exit +2024-05-23 12:29:12,462 INFO SenderThread:813 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:29:12,462 INFO SenderThread:813 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:send_exit():593] send defer +2024-05-23 12:29:12,464 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,464 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:29:12,464 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:29:12,464 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,464 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:29:12,464 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:29:12,464 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:29:12,465 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,465 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:29:12,465 INFO HandlerThread:813 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:29:12,465 DEBUG SystemMonitor:813 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:29:12,465 DEBUG SystemMonitor:813 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:29:12,465 DEBUG SystemMonitor:813 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:29:12,466 INFO HandlerThread:813 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:29:12,466 INFO HandlerThread:813 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:29:12,466 INFO HandlerThread:813 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:29:12,466 INFO HandlerThread:813 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:29:12,466 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:29:12,467 DEBUG SenderThread:813 [sender.py:send():378] send: stats +2024-05-23 12:29:12,467 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,467 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:29:12,467 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:29:12,467 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,467 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:29:12,467 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:29:12,467 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:29:12,467 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,467 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:29:12,468 DEBUG SenderThread:813 [sender.py:send():378] send: summary +2024-05-23 12:29:12,468 INFO SenderThread:813 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:29:12,468 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,469 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:29:12,469 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:29:12,469 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,469 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:29:12,469 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,469 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:29:12,473 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:29:12,571 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:29:12,572 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:12,572 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:29:12,572 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:12,572 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:29:13,008 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:29:13,008 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:13,008 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:29:13,008 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:13,008 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:29:13,008 INFO SenderThread:813 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:29:13,009 INFO SenderThread:813 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:29:13,009 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:29:13,009 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:13,009 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:29:13,009 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:13,009 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:29:13,009 INFO SenderThread:813 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:29:13,431 INFO SenderThread:813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log +2024-05-23 12:29:13,432 INFO SenderThread:813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml +2024-05-23 12:29:13,432 INFO SenderThread:813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json +2024-05-23 12:29:13,432 INFO SenderThread:813 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files +2024-05-23 12:29:13,432 INFO SenderThread:813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log output.log +2024-05-23 12:29:13,432 INFO SenderThread:813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt requirements.txt +2024-05-23 12:29:13,435 INFO SenderThread:813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml config.yaml +2024-05-23 12:29:13,435 INFO SenderThread:813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:29:13,435 INFO SenderThread:813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json wandb-summary.json +2024-05-23 12:29:13,435 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:29:13,435 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:13,435 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:29:13,437 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:13,437 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:29:13,437 INFO SenderThread:813 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:29:13,462 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:29:13,462 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:29:13,668 INFO wandb-upload_0:813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/output.log +2024-05-23 12:29:14,017 INFO wandb-upload_1:813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/requirements.txt +2024-05-23 12:29:14,046 INFO wandb-upload_2:813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/config.yaml +2024-05-23 12:29:14,051 INFO wandb-upload_3:813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/files/wandb-summary.json +2024-05-23 12:29:14,251 INFO Thread-11 (_thread_body):813 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:29:14,251 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:14,251 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:29:14,251 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:14,252 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:29:14,252 INFO SenderThread:813 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:29:14,252 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:29:14,252 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:14,252 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:29:14,252 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:14,252 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:29:14,252 INFO SenderThread:813 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:29:14,315 INFO SenderThread:813 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:29:14,315 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:29:14,315 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:14,315 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:29:14,315 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:14,315 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:29:14,315 INFO SenderThread:813 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:29:14,315 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:29:14,316 INFO HandlerThread:813 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:29:14,316 DEBUG SenderThread:813 [sender.py:send():378] send: final +2024-05-23 12:29:14,316 DEBUG SenderThread:813 [sender.py:send():378] send: footer +2024-05-23 12:29:14,316 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: defer +2024-05-23 12:29:14,316 INFO SenderThread:813 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:29:14,317 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:29:14,317 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:29:14,317 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:29:14,317 DEBUG SenderThread:813 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:29:14,635 INFO MainThread:813 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:29:14,635 INFO MainThread:813 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:29:14,635 INFO MainThread:813 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:29:14,635 DEBUG HandlerThread:813 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:29:14,635 INFO HandlerThread:813 [handler.py:finish():882] shutting down handler +2024-05-23 12:29:15,317 INFO WriterThread:813 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/run-opxox6sx.wandb +2024-05-23 12:29:15,635 INFO SenderThread:813 [sender.py:finish():1545] shutting down sender +2024-05-23 12:29:15,635 INFO SenderThread:813 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:29:15,635 INFO SenderThread:813 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..e8d139d845ebd9eb43a3b54b8beea8b799e3d17b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Configure stats pid to 657 +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:28:59,998 WARNING MainThread:657 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug.log +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/logs/debug-internal.log +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:28:59,998 INFO MainThread:657 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:28:59,999 INFO MainThread:657 [wandb_init.py:init():610] starting backend +2024-05-23 12:28:59,999 INFO MainThread:657 [wandb_init.py:init():614] setting up manager +2024-05-23 12:29:00,002 INFO MainThread:657 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:29:00,003 INFO MainThread:657 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:29:00,007 INFO MainThread:657 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:29:00,015 INFO MainThread:657 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:29:00,426 INFO MainThread:657 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:29:00,537 INFO MainThread:657 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:29:00,537 INFO MainThread:657 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:29:00,858 INFO MainThread:657 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:29:00,858 INFO MainThread:657 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:29:00,858 INFO MainThread:657 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:29:00,858 INFO MainThread:657 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:29:00,859 INFO MainThread:657 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:29:15,637 WARNING MsgRouterThr:657 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/run-opxox6sx.wandb b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/run-opxox6sx.wandb new file mode 100644 index 0000000000000000000000000000000000000000..67f3d8d997c3f51e48502e3205352ec8ad648b51 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_122859-opxox6sx/run-opxox6sx.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd910011651f008170528bffdf46653fa091cfbb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469836 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9b39b0001a9a43306cfae1b7bb26da6c8035d46b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:10:36,986 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:10:45,452 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:10:45,453 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:10:45,454 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step30000'} +2024-05-23:13:10:47,878 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step30000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step30000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..06ca9276bf46251842f8a794e5452e9608b52c58 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:10:36.776737", + "startedAt": "2024-05-23T13:10:36.252781", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step30000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.124975, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3352.334, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.61958694458008 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..9d13977505c57b4680c7b219d270cd280ebb6b9b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 13:10:36,273 INFO StreamThr :3881 [internal.py:wandb_internal():85] W&B internal server running at pid: 3881, started at: 2024-05-23 13:10:36.272086 +2024-05-23 13:10:36,279 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:10:36,280 INFO WriterThread:3881 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/run-esww7elp.wandb +2024-05-23 13:10:36,281 DEBUG SenderThread:3881 [sender.py:send():378] send: header +2024-05-23 13:10:36,285 DEBUG SenderThread:3881 [sender.py:send():378] send: run +2024-05-23 13:10:36,556 INFO SenderThread:3881 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files +2024-05-23 13:10:36,556 INFO SenderThread:3881 [sender.py:_start_run_threads():1123] run started: esww7elp with start time 1716469836.272648 +2024-05-23 13:10:36,557 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:10:36,557 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:10:36,680 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:10:36,683 DEBUG HandlerThread:3881 [system_info.py:__init__():26] System info init +2024-05-23 13:10:36,683 DEBUG HandlerThread:3881 [system_info.py:__init__():41] System info init done +2024-05-23 13:10:36,683 INFO HandlerThread:3881 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:10:36,683 INFO SystemMonitor:3881 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:10:36,683 INFO HandlerThread:3881 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:10:36,689 INFO SystemMonitor:3881 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:10:36,690 INFO SystemMonitor:3881 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:10:36,695 INFO SystemMonitor:3881 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:10:36,696 INFO SystemMonitor:3881 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:10:36,776 DEBUG HandlerThread:3881 [system_info.py:probe():150] Probing system +2024-05-23 13:10:36,780 DEBUG HandlerThread:3881 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:10:36,790 ERROR HandlerThread:3881 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:10:36,790 DEBUG HandlerThread:3881 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:10:36,790 DEBUG HandlerThread:3881 [system_info.py:probe():198] Probing system done +2024-05-23 13:10:36,790 DEBUG HandlerThread:3881 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:10:36.776737', 'startedAt': '2024-05-23T13:10:36.252781', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step30000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.124975, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3352.334, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.61958694458008}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:10:36,791 INFO HandlerThread:3881 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:10:36,791 INFO HandlerThread:3881 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:10:36,793 INFO HandlerThread:3881 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:10:36,798 DEBUG SenderThread:3881 [sender.py:send():378] send: files +2024-05-23 13:10:36,798 INFO SenderThread:3881 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:10:36,980 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:10:36,981 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:10:36,981 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:10:36,983 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:10:37,078 DEBUG SenderThread:3881 [sender.py:send():378] send: telemetry +2024-05-23 13:10:37,398 INFO wandb-upload_0:3881 [upload_job.py:push():130] Uploaded file /tmp/tmpy1n5pmb5wandb/0c9rlp5q-wandb-metadata.json +2024-05-23 13:10:37,558 INFO Thread-12 :3881 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log +2024-05-23 13:10:37,558 INFO Thread-12 :3881 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-metadata.json +2024-05-23 13:10:37,558 INFO Thread-12 :3881 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt +2024-05-23 13:10:39,558 INFO Thread-12 :3881 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log +2024-05-23 13:10:42,081 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:10:47,455 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:10:47,565 INFO Thread-12 :3881 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log +2024-05-23 13:10:47,894 DEBUG SenderThread:3881 [sender.py:send():378] send: exit +2024-05-23 13:10:47,894 INFO SenderThread:3881 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:10:47,894 INFO SenderThread:3881 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 13:10:47,895 INFO SenderThread:3881 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:10:47,896 INFO SenderThread:3881 [sender.py:send_exit():593] send defer +2024-05-23 13:10:47,896 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,896 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:10:47,896 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,896 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:10:47,896 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:10:47,896 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,896 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:10:47,896 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,896 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:10:47,896 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:10:47,896 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,896 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:10:47,896 INFO HandlerThread:3881 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:10:47,896 DEBUG SystemMonitor:3881 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:10:47,897 DEBUG SystemMonitor:3881 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:10:47,897 DEBUG SystemMonitor:3881 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:10:47,897 INFO HandlerThread:3881 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:10:47,899 INFO HandlerThread:3881 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:10:47,899 INFO HandlerThread:3881 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:10:47,899 INFO HandlerThread:3881 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:10:47,899 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,899 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:10:47,899 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:10:47,899 DEBUG SenderThread:3881 [sender.py:send():378] send: stats +2024-05-23 13:10:47,899 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,900 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:10:47,901 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,901 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:10:47,901 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:10:47,901 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,901 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:10:47,901 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,901 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:10:47,901 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:10:47,901 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,901 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:10:47,901 DEBUG SenderThread:3881 [sender.py:send():378] send: summary +2024-05-23 13:10:47,902 INFO SenderThread:3881 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:10:47,902 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,902 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:10:47,902 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:10:47,902 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,902 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:10:47,903 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,903 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:10:47,907 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:10:47,974 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:10:47,974 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:47,974 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:10:47,974 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:47,974 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:10:48,567 INFO Thread-12 :3881 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml +2024-05-23 13:10:48,567 INFO Thread-12 :3881 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json +2024-05-23 13:10:48,894 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:10:49,102 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:10:49,102 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:10:49,102 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:49,102 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:10:49,102 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:49,102 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:10:49,102 INFO SenderThread:3881 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:10:49,103 INFO SenderThread:3881 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:10:49,103 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:10:49,103 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:49,103 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:10:49,103 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:49,103 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:10:49,103 INFO SenderThread:3881 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:10:49,568 INFO SenderThread:3881 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log +2024-05-23 13:10:49,568 INFO SenderThread:3881 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files +2024-05-23 13:10:49,569 INFO SenderThread:3881 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:10:49,569 INFO SenderThread:3881 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt requirements.txt +2024-05-23 13:10:49,569 INFO SenderThread:3881 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json wandb-summary.json +2024-05-23 13:10:49,571 INFO SenderThread:3881 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log output.log +2024-05-23 13:10:49,572 INFO SenderThread:3881 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml config.yaml +2024-05-23 13:10:49,572 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:10:49,572 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:49,572 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:10:49,572 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:49,572 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:10:49,572 INFO SenderThread:3881 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:10:49,872 INFO wandb-upload_0:3881 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/requirements.txt +2024-05-23 13:10:49,894 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:10:49,894 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:10:50,155 INFO wandb-upload_1:3881 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/wandb-summary.json +2024-05-23 13:10:50,183 INFO wandb-upload_2:3881 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/output.log +2024-05-23 13:10:50,184 INFO wandb-upload_3:3881 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/files/config.yaml +2024-05-23 13:10:50,384 INFO Thread-11 (_thread_body):3881 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:10:50,384 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:50,384 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:10:50,384 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:50,384 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:10:50,385 INFO SenderThread:3881 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:10:50,385 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:10:50,385 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:50,385 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:10:50,385 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:50,385 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:10:50,385 INFO SenderThread:3881 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:10:50,731 INFO SenderThread:3881 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:10:50,732 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:10:50,732 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:50,732 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:10:50,732 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:50,732 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:10:50,732 INFO SenderThread:3881 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:10:50,732 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:10:50,732 INFO HandlerThread:3881 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:10:50,732 DEBUG SenderThread:3881 [sender.py:send():378] send: final +2024-05-23 13:10:50,732 DEBUG SenderThread:3881 [sender.py:send():378] send: footer +2024-05-23 13:10:50,732 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: defer +2024-05-23 13:10:50,732 INFO SenderThread:3881 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:10:50,733 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:10:50,733 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:10:50,733 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:10:50,734 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:10:50,734 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:10:50,734 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:10:50,734 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:10:50,734 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:10:50,734 DEBUG SenderThread:3881 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:10:50,797 INFO MainThread:3881 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:10:50,797 INFO MainThread:3881 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:10:50,797 INFO MainThread:3881 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:10:50,798 DEBUG HandlerThread:3881 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:10:50,798 INFO HandlerThread:3881 [handler.py:finish():882] shutting down handler +2024-05-23 13:10:51,734 INFO WriterThread:3881 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/run-esww7elp.wandb +2024-05-23 13:10:51,797 INFO SenderThread:3881 [sender.py:finish():1545] shutting down sender +2024-05-23 13:10:51,797 INFO SenderThread:3881 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:10:51,797 INFO SenderThread:3881 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..90aba7e0467cd5a622733b67819ad08dada73e27 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Configure stats pid to 3726 +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:10:36,267 WARNING MainThread:3726 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:10:36,267 INFO MainThread:3726 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug.log +2024-05-23 13:10:36,268 INFO MainThread:3726 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/logs/debug-internal.log +2024-05-23 13:10:36,268 INFO MainThread:3726 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:10:36,268 INFO MainThread:3726 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:10:36,268 INFO MainThread:3726 [wandb_init.py:init():610] starting backend +2024-05-23 13:10:36,268 INFO MainThread:3726 [wandb_init.py:init():614] setting up manager +2024-05-23 13:10:36,271 INFO MainThread:3726 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:10:36,272 INFO MainThread:3726 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:10:36,275 INFO MainThread:3726 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:10:36,284 INFO MainThread:3726 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:10:36,556 INFO MainThread:3726 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:10:36,674 INFO MainThread:3726 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:10:36,674 INFO MainThread:3726 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:10:36,981 INFO MainThread:3726 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:10:36,982 INFO MainThread:3726 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:10:36,982 INFO MainThread:3726 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:10:36,982 INFO MainThread:3726 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:10:36,984 INFO MainThread:3726 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:10:51,798 WARNING MsgRouterThr:3726 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/run-esww7elp.wandb b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/run-esww7elp.wandb new file mode 100644 index 0000000000000000000000000000000000000000..d62eb3f69ab95db320a834d2ac46d7d641aae6fe Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_131036-esww7elp/run-esww7elp.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02aa740230b0a6ffac7f4ee9f1aa2fa57a18b76a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/config.yaml @@ -0,0 +1,284 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717073930 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 13 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step40000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/media/table/evaluation/eval_results_1_9a88afbbc424c8e0720b.table.json b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/media/table/evaluation/eval_results_1_9a88afbbc424c8e0720b.table.json new file mode 100644 index 0000000000000000000000000000000000000000..a9094188b006c13c19c3ad60df2287ad9ae27ee5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/media/table/evaluation/eval_results_1_9a88afbbc424c8e0720b.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.4980268350434096", "0.0141"], ["sst2", 1.0, "none", 0, "acc", "0.5206422018348624", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5217627856365615", "0.0117"], ["piqa", 1.0, "none", 0, "acc_norm", "0.49455930359085964", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.3161764705882353", "0.0230"], ["mrpc", 1.0, "none", 0, "f1", "0.0", "0.0000"], ["copa", 1.0, "none", 0, "acc", "0.54", "0.0501"], ["boolq", 2.0, "none", 0, "acc", "0.3801223241590214", "0.0085"], ["arc_easy", 1.0, "none", 0, "acc", "0.26725589225589225", "0.0091"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2668350168350168", "0.0091"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/output.log b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..aeb88c0fb99d0fbea672c0d641e4ac23eae7d7b0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125850-zm975g09/files/output.log @@ -0,0 +1,597 @@ + +2024-05-30:12:58:51,049 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:12:59:00,203 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:12:59:00,204 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:12:59:00,205 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step40000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer'} +2024-05-30:12:59:02,495 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 16.2MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 2.00MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 4.37MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 1.08MB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 44754.78 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 330986.29 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 153057.19 examples/s] +2024-05-30:12:59:32,375 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:59:32,375 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 39.3MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 28.0MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 17.4MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 21940.31 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22219.37 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 22464.83 examples/s] +2024-05-30:12:59:36,174 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:59:36,174 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 51.2MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16365.14 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 13122.37 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 16449.03 examples/s] +2024-05-30:12:59:38,283 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:59:38,284 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-30:12:59:38,284 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-30:12:59:38,284 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 43.0MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 4.53MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 516kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.08MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 409037.20 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 173539.81 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 393066.46 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 11.1MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.9MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.15MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 21.9MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23732.33 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24181.89 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 22264.83 examples/s] +2024-05-30:12:59:49,989 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:59:49,990 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 20.1MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 469kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 1.04MB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1400944.17 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 400946.40 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 544043.56 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 9.79MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 15.6MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 6.89MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 23848.54 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 22950.21 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23651.91 examples/s] +2024-05-30:13:00:03,151 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 68998.34it/s] +2024-05-30:13:00:03,248 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2586.27it/s] +2024-05-30:13:00:03,615 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1072.59it/s] +2024-05-30:13:00:05,412 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1875.30it/s] +2024-05-30:13:00:05,661 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 61644.68it/s] +2024-05-30:13:00:05,670 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1999.60it/s] +2024-05-30:13:00:07,436 INFO [task.py:395] Building contexts for arc_easy on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1071.02it/s] +2024-05-30:13:00:09,797 INFO [evaluator.py:379] Running loglikelihood requests +Token indices sequence length is longer than the specified maximum sequence length for this model (1333 > 1024). Running this sequence through the model will result in indexing errors +Running loglikelihood requests: 0%| | 0/25011 [00:00 torch.Tensor: + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@torch.no_grad() +def convert_maskformer_checkpoint( + model_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False +): + """ + Copy/paste/tweak model's weights to our MaskFormer structure. + """ + config = get_maskformer_config(model_name) + + # load original state_dict + with open(checkpoint_path, "rb") as f: + data = pickle.load(f) + state_dict = data["model"] + + # for name, param in state_dict.items(): + # print(name, param.shape) + + # rename keys + rename_keys = create_rename_keys(config) + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_swin_q_k_v(state_dict, config.backbone_config) + read_in_decoder_q_k_v(state_dict, config) + + # update to torch tensors + for key, value in state_dict.items(): + state_dict[key] = torch.from_numpy(value) + + # load 🤗 model + model = MaskFormerForInstanceSegmentation(config) + model.eval() + + for name, param in model.named_parameters(): + print(name, param.shape) + + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + assert missing_keys == [ + "model.pixel_level_module.encoder.model.layernorm.weight", + "model.pixel_level_module.encoder.model.layernorm.bias", + ] + assert len(unexpected_keys) == 0, f"Unexpected keys: {unexpected_keys}" + + # verify results + image = prepare_img() + if "vistas" in model_name: + ignore_index = 65 + elif "cityscapes" in model_name: + ignore_index = 65535 + else: + ignore_index = 255 + reduce_labels = True if "ade" in model_name else False + image_processor = MaskFormerImageProcessor(ignore_index=ignore_index, reduce_labels=reduce_labels) + + inputs = image_processor(image, return_tensors="pt") + + outputs = model(**inputs) + + print("Logits:", outputs.class_queries_logits[0, :3, :3]) + + if model_name == "maskformer-swin-tiny-ade": + expected_logits = torch.tensor( + [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] + ) + assert torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_logits, atol=1e-4) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and image processor to {pytorch_dump_folder_path}") + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + image_processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print("Pushing model and image processor to the hub...") + model.push_to_hub(f"nielsr/{model_name}") + image_processor.push_to_hub(f"nielsr/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="maskformer-swin-tiny-ade", + type=str, + help=("Name of the MaskFormer model you'd like to convert",), + ) + parser.add_argument( + "--checkpoint_path", + default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", + type=str, + help="Path to the original state dict (.pth file).", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_maskformer_checkpoint( + args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/maskformer/image_processing_maskformer.py b/venv/lib/python3.10/site-packages/transformers/models/maskformer/image_processing_maskformer.py new file mode 100644 index 0000000000000000000000000000000000000000..3c854b35c76edbf0074d7f51aed0d4eab77665db --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/maskformer/image_processing_maskformer.py @@ -0,0 +1,1305 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for MaskFormer.""" + +import math +import warnings +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + PaddingMode, + get_resize_output_image_size, + pad, + rescale, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + TensorType, + is_torch_available, + is_torch_tensor, + logging, +) + + +logger = logging.get_logger(__name__) + + +if TYPE_CHECKING: + from transformers import MaskFormerForInstanceSegmentationOutput + + +if is_torch_available(): + import torch + from torch import nn + + +# Copied from transformers.models.detr.image_processing_detr.max_across_indices +def max_across_indices(values: Iterable[Any]) -> List[Any]: + """ + Return the maximum value across all indices of an iterable of values. + """ + return [max(values_i) for values_i in zip(*values)] + + +# Copied from transformers.models.detr.image_processing_detr.get_max_height_width +def get_max_height_width( + images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> List[int]: + """ + Get the maximum height and width across all images in a batch. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(images[0]) + + if input_data_format == ChannelDimension.FIRST: + _, max_height, max_width = max_across_indices([img.shape for img in images]) + elif input_data_format == ChannelDimension.LAST: + max_height, max_width, _ = max_across_indices([img.shape for img in images]) + else: + raise ValueError(f"Invalid channel dimension format: {input_data_format}") + return (max_height, max_width) + + +# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask +def make_pixel_mask( + image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> np.ndarray: + """ + Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. + + Args: + image (`np.ndarray`): + Image to make the pixel mask for. + output_size (`Tuple[int, int]`): + Output size of the mask. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + mask = np.zeros(output_size, dtype=np.int64) + mask[:input_height, :input_width] = 1 + return mask + + +# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle +def binary_mask_to_rle(mask): + """ + Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return list(runs) + + +# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + +# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + + Args: + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + + return masks[to_keep], scores[to_keep], labels[to_keep] + + +# Copied from transformers.models.detr.image_processing_detr.check_segment_validity +def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= mask_threshold).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +# Copied from transformers.models.detr.image_processing_detr.compute_segments +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = nn.functional.interpolate( + mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity( + mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold + ) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + +# TODO: (Amy) Move to image_transforms +def convert_segmentation_map_to_binary_masks( + segmentation_map: "np.ndarray", + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, +): + if reduce_labels and ignore_index is None: + raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") + + if reduce_labels: + segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) + + # Get unique ids (class or instance ids based on input) + all_labels = np.unique(segmentation_map) + + # Drop background label if applicable + if ignore_index is not None: + all_labels = all_labels[all_labels != ignore_index] + + # Generate a binary mask for each object instance + binary_masks = [(segmentation_map == i) for i in all_labels] + binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) + + # Convert instance ids to class ids + if instance_id_to_semantic_id is not None: + labels = np.zeros(all_labels.shape[0]) + + for label in all_labels: + class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] + labels[all_labels == label] = class_id - 1 if reduce_labels else class_id + else: + labels = all_labels + + return binary_masks.astype(np.float32), labels.astype(np.int64) + + +def get_maskformer_resize_output_image_size( + image: np.ndarray, + size: Union[int, Tuple[int, int], List[int], Tuple[int]], + max_size: Optional[int] = None, + size_divisor: int = 0, + default_to_square: bool = True, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[int, int]: + """ + Computes the output size given the desired size. + + Args: + image (`np.ndarray`): + The input image. + size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): + The size of the output image. + max_size (`int`, *optional*): + The maximum size of the output image. + size_divisor (`int`, *optional*, defaults to 0): + If `size_divisor` is given, the output image size will be divisible by the number. + default_to_square (`bool`, *optional*, defaults to `True`): + Whether to default to square if no size is provided. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + + Returns: + `Tuple[int, int]`: The output size. + """ + output_size = get_resize_output_image_size( + input_image=image, + size=size, + default_to_square=default_to_square, + max_size=max_size, + input_data_format=input_data_format, + ) + + if size_divisor > 0: + height, width = output_size + height = int(math.ceil(height / size_divisor) * size_divisor) + width = int(math.ceil(width / size_divisor) * size_divisor) + output_size = (height, width) + + return output_size + + +class MaskFormerImageProcessor(BaseImageProcessor): + r""" + Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets + for the model. + + This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the input to a certain `size`. + size (`int`, *optional*, defaults to 800): + Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a + sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of + the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * + height / width, size)`. + size_divisor (`int`, *optional*, defaults to 32): + Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in + Swin Transformer. + resample (`int`, *optional*, defaults to `Resampling.BILINEAR`): + An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, + `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, + `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set + to `True`. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the input to a certain `scale`. + rescale_factor (`float`, *optional*, defaults to `1/ 255`): + Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. + image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the + ImageNet std. + ignore_index (`int`, *optional*): + Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels + denoted with 0 (background) will be replaced with `ignore_index`. + do_reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 + is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). + The background label will be replaced by `ignore_index`. + + """ + + model_input_names = ["pixel_values", "pixel_mask"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + size_divisor: int = 32, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: float = 1 / 255, + do_normalize: bool = True, + image_mean: Union[float, List[float]] = None, + image_std: Union[float, List[float]] = None, + ignore_index: Optional[int] = None, + do_reduce_labels: bool = False, + **kwargs, + ): + if "size_divisibility" in kwargs: + warnings.warn( + "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use " + "`size_divisor` instead.", + FutureWarning, + ) + size_divisor = kwargs.pop("size_divisibility") + if "max_size" in kwargs: + warnings.warn( + "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']" + " instead.", + FutureWarning, + ) + # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst + # `size` can still be pass in as an int + self._max_size = kwargs.pop("max_size") + else: + self._max_size = 1333 + if "reduce_labels" in kwargs: + warnings.warn( + "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use " + "`do_reduce_labels` instead.", + FutureWarning, + ) + do_reduce_labels = kwargs.pop("reduce_labels") + + size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} + size = get_size_dict(size, max_size=self._max_size, default_to_square=False) + + super().__init__(**kwargs) + self.do_resize = do_resize + self.size = size + self.resample = resample + self.size_divisor = size_divisor + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self.ignore_index = ignore_index + self.do_reduce_labels = do_reduce_labels + self._valid_processor_keys = [ + "images", + "segmentation_maps", + "instance_id_to_semantic_id", + "do_resize", + "size", + "size_divisor", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "ignore_index", + "do_reduce_labels", + "return_tensors", + "data_format", + "input_data_format", + ] + + @classmethod + def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): + """ + Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is + created using from_dict and kwargs e.g. `MaskFormerImageProcessor.from_pretrained(checkpoint, max_size=800)` + """ + image_processor_dict = image_processor_dict.copy() + if "max_size" in kwargs: + image_processor_dict["max_size"] = kwargs.pop("max_size") + if "size_divisibility" in kwargs: + image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility") + return super().from_dict(image_processor_dict, **kwargs) + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + size_divisor: int = 0, + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format=None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an + int, smaller edge of the image will be matched to this number. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + The size of the output image. + size_divisor (`int`, *optional*, defaults to 0): + If `size_divisor` is given, the output image size will be divisible by the number. + resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use when resizing the image. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + if "max_size" in kwargs: + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + max_size = kwargs.pop("max_size") + else: + max_size = None + size = get_size_dict(size, max_size=max_size, default_to_square=False) + if "shortest_edge" in size and "longest_edge" in size: + size, max_size = size["shortest_edge"], size["longest_edge"] + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + max_size = None + else: + raise ValueError( + "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" + f" {size.keys()}." + ) + size = get_maskformer_resize_output_image_size( + image=image, + size=size, + max_size=max_size, + size_divisor=size_divisor, + default_to_square=False, + input_data_format=input_data_format, + ) + image = resize( + image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs + ) + return image + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale + def rescale( + self, + image: np.ndarray, + rescale_factor: float, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Rescale the image by the given factor. image = image * rescale_factor. + + Args: + image (`np.ndarray`): + Image to rescale. + rescale_factor (`float`): + The value to use for rescaling. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. If unset, is inferred from the input image. Can be + one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) + + def convert_segmentation_map_to_binary_masks( + self, + segmentation_map: "np.ndarray", + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, + ): + reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels + ignore_index = ignore_index if ignore_index is not None else self.ignore_index + return convert_segmentation_map_to_binary_masks( + segmentation_map=segmentation_map, + instance_id_to_semantic_id=instance_id_to_semantic_id, + ignore_index=ignore_index, + reduce_labels=reduce_labels, + ) + + def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: + return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) + + def _preprocess( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + size_divisor: int = None, + resample: PILImageResampling = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + if do_resize: + image = self.resize( + image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format + ) + if do_rescale: + image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) + if do_normalize: + image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) + return image + + def _preprocess_image( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + size_divisor: int = None, + resample: PILImageResampling = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Preprocesses a single image.""" + # All transformations expect numpy arrays. + image = to_numpy_array(image) + if is_scaled_image(image) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + image = self._preprocess( + image=image, + do_resize=do_resize, + size=size, + size_divisor=size_divisor, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + input_data_format=input_data_format, + ) + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + return image + + def _preprocess_mask( + self, + segmentation_map: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + size_divisor: int = 0, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Preprocesses a single mask.""" + segmentation_map = to_numpy_array(segmentation_map) + # Add channel dimension if missing - needed for certain transformations + if segmentation_map.ndim == 2: + added_channel_dim = True + segmentation_map = segmentation_map[None, ...] + input_data_format = ChannelDimension.FIRST + else: + added_channel_dim = False + if input_data_format is None: + input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) + # TODO: (Amy) + # Remork segmentation map processing to include reducing labels and resizing which doesn't + # drop segment IDs > 255. + segmentation_map = self._preprocess( + image=segmentation_map, + do_resize=do_resize, + resample=PILImageResampling.NEAREST, + size=size, + size_divisor=size_divisor, + do_rescale=False, + do_normalize=False, + input_data_format=input_data_format, + ) + # Remove extra channel dimension if added for processing + if added_channel_dim: + segmentation_map = segmentation_map.squeeze(0) + return segmentation_map + + def preprocess( + self, + images: ImageInput, + segmentation_maps: Optional[ImageInput] = None, + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + size_divisor: Optional[int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + ignore_index: Optional[int] = None, + do_reduce_labels: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> BatchFeature: + if "pad_and_return_pixel_mask" in kwargs: + warnings.warn( + "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", + FutureWarning, + ) + if "reduce_labels" in kwargs: + warnings.warn( + "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use" + " `do_reduce_labels` instead.", + FutureWarning, + ) + if do_reduce_labels is not None: + raise ValueError( + "Cannot use both `reduce_labels` and `do_reduce_labels`. Please use `do_reduce_labels` instead." + ) + + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False, max_size=self._max_size) + size_divisor = size_divisor if size_divisor is not None else self.size_divisor + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + ignore_index = ignore_index if ignore_index is not None else self.ignore_index + do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if segmentation_maps is not None and not valid_images(segmentation_maps): + raise ValueError( + "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + images = make_list_of_images(images) + if segmentation_maps is not None: + segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) + + if segmentation_maps is not None and len(images) != len(segmentation_maps): + raise ValueError("Images and segmentation maps must have the same length.") + + images = [ + self._preprocess_image( + image, + do_resize=do_resize, + size=size, + size_divisor=size_divisor, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + ) + for image in images + ] + + if segmentation_maps is not None: + segmentation_maps = [ + self._preprocess_mask( + segmentation_map, do_resize, size, size_divisor, input_data_format=input_data_format + ) + for segmentation_map in segmentation_maps + ] + encoded_inputs = self.encode_inputs( + images, + segmentation_maps, + instance_id_to_semantic_id, + ignore_index, + do_reduce_labels, + return_tensors, + input_data_format=input_data_format, + ) + return encoded_inputs + + # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image + def _pad_image( + self, + image: np.ndarray, + output_size: Tuple[int, int], + constant_values: Union[float, Iterable[float]] = 0, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Pad an image with zeros to the given size. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = output_size + + pad_bottom = output_height - input_height + pad_right = output_width - input_width + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=PaddingMode.CONSTANT, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + return padded_image + + # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad + def pad( + self, + images: List[np.ndarray], + constant_values: Union[float, Iterable[float]] = 0, + return_pixel_mask: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> BatchFeature: + """ + Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width + in the batch and optionally returns their corresponding pixel mask. + + Args: + image (`np.ndarray`): + Image to pad. + constant_values (`float` or `Iterable[float]`, *optional*): + The value to use for the padding if `mode` is `"constant"`. + return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether to return a pixel mask. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + pad_size = get_max_height_width(images, input_data_format=input_data_format) + + padded_images = [ + self._pad_image( + image, + pad_size, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + for image in images + ] + data = {"pixel_values": padded_images} + + if return_pixel_mask: + masks = [ + make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) + for image in images + ] + data["pixel_mask"] = masks + + return BatchFeature(data=data, tensor_type=return_tensors) + + def encode_inputs( + self, + pixel_values_list: List[ImageInput], + segmentation_maps: ImageInput = None, + instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, + return_tensors: Optional[Union[str, TensorType]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + """ + Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. + + MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps + will be converted to lists of binary masks and their respective labels. Let's see an example, assuming + `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = + [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for + each mask. + + Args: + pixel_values_list (`List[ImageInput]`): + List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, + width)`. + + segmentation_maps (`ImageInput`, *optional*): + The corresponding semantic segmentation maps with the pixel-wise annotations. + + (`bool`, *optional*, defaults to `True`): + Whether or not to pad images up to the largest image in a batch and create a pixel mask. + + If left to the default, will return a pixel mask that is: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): + A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an + instance segmentation map where each pixel represents an instance id. Can be provided as a single + dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map + instance ids in each image separately. + + return_tensors (`str` or [`~file_utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in + `self.model_input_names`). + - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model + (when `annotations` are provided). + - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when + `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of + `mask_labels[i][j]` if `class_labels[i][j]`. + """ + ignore_index = self.ignore_index if ignore_index is None else ignore_index + reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels + + pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(pixel_values_list[0]) + + encoded_inputs = self.pad( + pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format + ) + + if segmentation_maps is not None: + mask_labels = [] + class_labels = [] + pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format) + # Convert to list of binary masks and labels + for idx, segmentation_map in enumerate(segmentation_maps): + segmentation_map = to_numpy_array(segmentation_map) + if isinstance(instance_id_to_semantic_id, list): + instance_id = instance_id_to_semantic_id[idx] + else: + instance_id = instance_id_to_semantic_id + # Use instance2class_id mapping per image + masks, classes = self.convert_segmentation_map_to_binary_masks( + segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels + ) + # We add an axis to make them compatible with the transformations library + # this will be removed in the future + masks = [mask[None, ...] for mask in masks] + masks = [ + self._pad_image( + image=mask, + output_size=pad_size, + constant_values=ignore_index, + input_data_format=ChannelDimension.FIRST, + ) + for mask in masks + ] + masks = np.concatenate(masks, axis=0) + mask_labels.append(torch.from_numpy(masks)) + class_labels.append(torch.from_numpy(classes)) + + # we cannot batch them since they don't share a common class size + encoded_inputs["mask_labels"] = mask_labels + encoded_inputs["class_labels"] = class_labels + + return encoded_inputs + + def post_process_segmentation( + self, outputs: "MaskFormerForInstanceSegmentationOutput", target_size: Tuple[int, int] = None + ) -> "torch.Tensor": + """ + Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only + supports PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentationOutput`]): + The outputs from [`MaskFormerForInstanceSegmentation`]. + + target_size (`Tuple[int, int]`, *optional*): + If set, the `masks_queries_logits` will be resized to `target_size`. + + Returns: + `torch.Tensor`: + A tensor of shape (`batch_size, num_class_labels, height, width`). + """ + logger.warning( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`", + FutureWarning, + ) + + # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] + class_queries_logits = outputs.class_queries_logits + # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] + masks_queries_logits = outputs.masks_queries_logits + if target_size is not None: + masks_queries_logits = torch.nn.functional.interpolate( + masks_queries_logits, + size=target_size, + mode="bilinear", + align_corners=False, + ) + # remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] + masks_probs = masks_queries_logits.sigmoid() + # now we want to sum over the queries, + # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ + # where $ softmax(p) \in R^{q, c} $ is the mask classes + # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities + # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + + return segmentation + + def post_process_semantic_segmentation( + self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None + ) -> "torch.Tensor": + """ + Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + Returns: + `List[torch.Tensor]`: + A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) + corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each + `torch.Tensor` correspond to a semantic class id. + """ + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + # Remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Semantic segmentation logits of shape (batch_size, num_classes, height, width) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + batch_size = class_queries_logits.shape[0] + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if batch_size != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + semantic_segmentation = [] + for idx in range(batch_size): + resized_logits = torch.nn.functional.interpolate( + segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = segmentation.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation + + def post_process_instance_segmentation( + self, + outputs, + threshold: float = 0.5, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + target_sizes: Optional[List[Tuple[int, int]]] = None, + return_coco_annotation: Optional[bool] = False, + return_binary_maps: Optional[bool] = False, + ) -> List[Dict]: + """ + Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only + supports PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentation`]): + Raw outputs of the model. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + return_coco_annotation (`bool`, *optional*, defaults to `False`): + If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. + return_binary_maps (`bool`, *optional*, defaults to `False`): + If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps + (one per detected instance). + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or + `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to + `True`. Set to `None` if no mask if found above `threshold`. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- An integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. + """ + if return_coco_annotation and return_binary_maps: + raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") + + # [batch_size, num_queries, num_classes+1] + class_queries_logits = outputs.class_queries_logits + # [batch_size, num_queries, height, width] + masks_queries_logits = outputs.masks_queries_logits + + device = masks_queries_logits.device + num_classes = class_queries_logits.shape[-1] - 1 + num_queries = class_queries_logits.shape[-2] + + # Loop over items in batch size + results: List[Dict[str, TensorType]] = [] + + for i in range(class_queries_logits.shape[0]): + mask_pred = masks_queries_logits[i] + mask_cls = class_queries_logits[i] + + scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] + labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) + + scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) + labels_per_image = labels[topk_indices] + + topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor") + mask_pred = mask_pred[topk_indices] + pred_masks = (mask_pred > 0).float() + + # Calculate average mask prob + mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( + pred_masks.flatten(1).sum(1) + 1e-6 + ) + pred_scores = scores_per_image * mask_scores_per_image + pred_classes = labels_per_image + + segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1 + if target_sizes is not None: + segmentation = torch.zeros(target_sizes[i]) - 1 + pred_masks = torch.nn.functional.interpolate( + pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" + )[0] + + instance_maps, segments = [], [] + current_segment_id = 0 + for j in range(num_queries): + score = pred_scores[j].item() + + if not torch.all(pred_masks[j] == 0) and score >= threshold: + segmentation[pred_masks[j] == 1] = current_segment_id + segments.append( + { + "id": current_segment_id, + "label_id": pred_classes[j].item(), + "was_fused": False, + "score": round(score, 6), + } + ) + current_segment_id += 1 + instance_maps.append(pred_masks[j]) + + # Return segmentation map in run-length encoding (RLE) format + if return_coco_annotation: + segmentation = convert_segmentation_to_rle(segmentation) + + # Return a concatenated tensor of binary instance maps + if return_binary_maps and len(instance_maps) != 0: + segmentation = torch.stack(instance_maps, dim=0) + + results.append({"segmentation": segmentation, "segments_info": segments}) + return results + + def post_process_panoptic_segmentation( + self, + outputs, + threshold: float = 0.5, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_sizes: Optional[List[Tuple[int, int]]] = None, + ) -> List[Dict]: + """ + Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation + predictions. Only supports PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentationOutput`]): + The outputs from [`MaskFormerForInstanceSegmentation`]. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + label_ids_to_fuse (`Set[int]`, *optional*): + The labels in this state will have all their instances be fused together. For instance we could say + there can only be one sky in an image, but several persons, so the label ID for sky would be in that + set, but not the one for person. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction in batch. If left to None, predictions will not be + resized. + + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set + to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized + to the corresponding `target_sizes` entry. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- an integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. + Multiple instances of the same class / label were fused and assigned a single `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. + """ + + if label_ids_to_fuse is None: + logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") + label_ids_to_fuse = set() + + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] + num_labels = class_queries_logits.shape[-1] - 1 + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) + pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) + + # Loop over items in batch size + results: List[Dict[str, TensorType]] = [] + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels + ) + + # No mask found + if mask_probs_item.shape[0] <= 0: + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs=mask_probs_item, + pred_scores=pred_scores_item, + pred_labels=pred_labels_item, + mask_threshold=mask_threshold, + overlap_mask_area_threshold=overlap_mask_area_threshold, + label_ids_to_fuse=label_ids_to_fuse, + target_size=target_size, + ) + + results.append({"segmentation": segmentation, "segments_info": segments}) + return results diff --git a/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer.py b/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer.py new file mode 100644 index 0000000000000000000000000000000000000000..4419a36e9f840a6e685ce415523884a00298c8bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer.py @@ -0,0 +1,1963 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc.s and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch MaskFormer model.""" + +import math +from dataclasses import dataclass +from numbers import Number +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from torch import Tensor, nn + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask +from ...modeling_outputs import BaseModelOutputWithCrossAttentions +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import is_torch_greater_or_equal_than_2_1 +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_accelerate_available, + is_scipy_available, + logging, + replace_return_docstrings, + requires_backends, +) +from ...utils.backbone_utils import load_backbone +from ..detr import DetrConfig +from .configuration_maskformer import MaskFormerConfig +from .configuration_maskformer_swin import MaskFormerSwinConfig + + +if is_accelerate_available(): + from accelerate import PartialState + from accelerate.utils import reduce + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + +logger = logging.get_logger(__name__) + + +_CONFIG_FOR_DOC = "MaskFormerConfig" +_CHECKPOINT_FOR_DOC = "facebook/maskformer-swin-base-ade" + + +from ..deprecated._archive_maps import MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +# Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput +class DetrDecoderOutput(BaseModelOutputWithCrossAttentions): + """ + Base class for outputs of the DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, + namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them + gone through a layernorm. This is useful when training the model with auxiliary decoding losses. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): + Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a + layernorm. + """ + + intermediate_hidden_states: Optional[torch.FloatTensor] = None + + +@dataclass +class MaskFormerPixelLevelModuleOutput(ModelOutput): + """ + MaskFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the + `encoder` and `decoder`. By default, the `encoder` is a MaskFormerSwin Transformer and the `decoder` is a Feature + Pyramid Network (FPN). + + The `encoder_last_hidden_state` are referred on the paper as **images features**, while `decoder_last_hidden_state` + as **pixel embeddings** + + Args: + encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the encoder. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at + the output of each stage. + decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the decoder. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at + the output of each stage. + """ + + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + decoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MaskFormerPixelDecoderOutput(ModelOutput): + """ + MaskFormer's pixel decoder module output, practically a Feature Pyramid Network. It returns the last hidden state + and (optionally) the hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the + weighted average in the self-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MaskFormerModelOutput(ModelOutput): + """ + Class for outputs of [`MaskFormerModel`]. This class returns all the needed hidden states to compute the logits. + + Args: + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the encoder model (backbone). + pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN). + transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Last hidden states (final feature map) of the last stage of the transformer decoder model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder + model at the output of each stage. + pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel + decoder model at the output of each stage. + transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the + transformer decoder at the output of each stage. + hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and + `decoder_hidden_states` + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the + weighted average in the self-attention heads. + """ + + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None + transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + transformer_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MaskFormerForInstanceSegmentationOutput(ModelOutput): + """ + Class for outputs of [`MaskFormerForInstanceSegmentation`]. + + This output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or + [`~MaskFormerImageProcessor.post_process_instance_segmentation`] or + [`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see + [`~MaskFormerImageProcessor] for details regarding usage. + + Args: + loss (`torch.Tensor`, *optional*): + The computed loss, returned when labels are present. + class_queries_logits (`torch.FloatTensor`): + A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each + query. Note the `+ 1` is needed because we incorporate the null class. + masks_queries_logits (`torch.FloatTensor`): + A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each + query. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the encoder model (backbone). + pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN). + transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Last hidden states (final feature map) of the last stage of the transformer decoder model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder + model at the output of each stage. + pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel + decoder model at the output of each stage. + transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the transformer decoder at the output + of each stage. + hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and + `decoder_hidden_states`. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the + weighted average in the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + class_queries_logits: torch.FloatTensor = None + masks_queries_logits: torch.FloatTensor = None + auxiliary_logits: torch.FloatTensor = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None + transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + transformer_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +def upsample_like(pixel_values: Tensor, like: Tensor, mode: str = "bilinear") -> Tensor: + """ + An utility function that upsamples `pixel_values` to match the dimension of `like`. + + Args: + pixel_values (`torch.Tensor`): + The tensor we wish to upsample. + like (`torch.Tensor`): + The tensor we wish to use as size target. + mode (str, *optional*, defaults to `"bilinear"`): + The interpolation mode. + + Returns: + `torch.Tensor`: The upsampled tensor + """ + _, _, height, width = like.shape + upsampled = nn.functional.interpolate(pixel_values, size=(height, width), mode=mode, align_corners=False) + return upsampled + + +# refactored from original implementation +def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor: + r""" + Compute the DICE loss, similar to generalized IOU for masks as follows: + + $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$ + + In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow + + $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$ + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + num_masks (`int`): + The number of masks present in the current batch, used for normalization. + + Returns: + `torch.Tensor`: The computed loss. + """ + probs = inputs.sigmoid().flatten(1) + numerator = 2 * (probs * labels).sum(-1) + denominator = probs.sum(-1) + labels.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + loss = loss.sum() / num_masks + return loss + + +# refactored from original implementation +def sigmoid_focal_loss( + inputs: Tensor, labels: Tensor, num_masks: int, alpha: float = 0.25, gamma: float = 2 +) -> Tensor: + r""" + Focal loss proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) originally used in + RetinaNet. The loss is computed as follows: + + $$ \mathcal{L}_{\text{focal loss} = -(1 - p_t)^{\gamma}\log{(p_t)} $$ + + where \\(CE(p_t) = -\log{(p_t)}}\\), CE is the standard Cross Entropy Loss + + Please refer to equation (1,2,3) of the paper for a better understanding. + + Args: + inputs (`torch.Tensor`): + A float tensor of arbitrary shape. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + num_masks (`int`): + The number of masks present in the current batch, used for normalization. + alpha (float, *optional*, defaults to 0.25): + Weighting factor in range (0,1) to balance positive vs negative examples. + gamma (float, *optional*, defaults to 2.0): + Exponent of the modulating factor \\(1 - p_t\\) to balance easy vs hard examples. + + Returns: + `torch.Tensor`: The computed loss. + """ + criterion = nn.BCEWithLogitsLoss(reduction="none") + probs = inputs.sigmoid() + cross_entropy_loss = criterion(inputs, labels) + p_t = probs * labels + (1 - probs) * (1 - labels) + loss = cross_entropy_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * labels + (1 - alpha) * (1 - labels) + loss = alpha_t * loss + + loss = loss.mean(1).sum() / num_masks + return loss + + +# refactored from original implementation +def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor: + """ + A pair wise version of the dice loss, see `dice_loss` for usage. + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + + Returns: + `torch.Tensor`: The computed loss between each pairs. + """ + inputs = inputs.sigmoid().flatten(1) + numerator = 2 * torch.matmul(inputs, labels.T) + # using broadcasting to get a [num_queries, NUM_CLASSES] matrix + denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :] + loss = 1 - (numerator + 1) / (denominator + 1) + return loss + + +# refactored from original implementation +def pair_wise_sigmoid_focal_loss(inputs: Tensor, labels: Tensor, alpha: float = 0.25, gamma: float = 2.0) -> Tensor: + r""" + A pair wise version of the focal loss, see `sigmoid_focal_loss` for usage. + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha (float, *optional*, defaults to 0.25): + Weighting factor in range (0,1) to balance positive vs negative examples. + gamma (float, *optional*, defaults to 2.0): + Exponent of the modulating factor \\(1 - p_t\\) to balance easy vs hard examples. + + Returns: + `torch.Tensor`: The computed loss between each pairs. + """ + if alpha < 0: + raise ValueError("alpha must be positive") + + height_and_width = inputs.shape[1] + + criterion = nn.BCEWithLogitsLoss(reduction="none") + prob = inputs.sigmoid() + cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs)) + focal_pos = ((1 - prob) ** gamma) * cross_entropy_loss_pos + focal_pos *= alpha + + cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs)) + + focal_neg = (prob**gamma) * cross_entropy_loss_neg + focal_neg *= 1 - alpha + + loss = torch.matmul(focal_pos, labels.T) + torch.matmul(focal_neg, (1 - labels).T) + + return loss / height_and_width + + +# Copied from transformers.models.detr.modeling_detr.DetrAttention +class DetrAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. + + Here, we add position embeddings to the queries and keys (as explained in the DETR paper). + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor], **kwargs): + position_embeddings = kwargs.pop("position_embeddings", None) + + if kwargs: + raise ValueError(f"Unexpected arguments {kwargs.keys()}") + + if position_embeddings is not None and object_queries is not None: + raise ValueError( + "Cannot specify both position_embeddings and object_queries. Please use just object_queries" + ) + + if position_embeddings is not None: + logger.warning_once( + "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" + ) + object_queries = position_embeddings + + return tensor if object_queries is None else tensor + object_queries + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + object_queries: Optional[torch.Tensor] = None, + key_value_states: Optional[torch.Tensor] = None, + spatial_position_embeddings: Optional[torch.Tensor] = None, + output_attentions: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + position_embeddings = kwargs.pop("position_ebmeddings", None) + key_value_position_embeddings = kwargs.pop("key_value_position_embeddings", None) + + if kwargs: + raise ValueError(f"Unexpected arguments {kwargs.keys()}") + + if position_embeddings is not None and object_queries is not None: + raise ValueError( + "Cannot specify both position_embeddings and object_queries. Please use just object_queries" + ) + + if key_value_position_embeddings is not None and spatial_position_embeddings is not None: + raise ValueError( + "Cannot specify both key_value_position_embeddings and spatial_position_embeddings. Please use just spatial_position_embeddings" + ) + + if position_embeddings is not None: + logger.warning_once( + "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" + ) + object_queries = position_embeddings + + if key_value_position_embeddings is not None: + logger.warning_once( + "key_value_position_embeddings has been deprecated and will be removed in v4.34. Please use spatial_position_embeddings instead" + ) + spatial_position_embeddings = key_value_position_embeddings + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size, target_len, embed_dim = hidden_states.size() + + # add position embeddings to the hidden states before projecting to queries and keys + if object_queries is not None: + hidden_states_original = hidden_states + hidden_states = self.with_pos_embed(hidden_states, object_queries) + + # add key-value position embeddings to the key value states + if spatial_position_embeddings is not None: + key_value_states_original = key_value_states + key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + source_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (batch_size, 1, target_len, source_len): + raise ValueError( + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" + ) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.detr.modeling_detr.DetrDecoderLayer +class DetrDecoderLayer(nn.Module): + def __init__(self, config: DetrConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = DetrAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = DetrAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + object_queries: Optional[torch.Tensor] = None, + query_position_embeddings: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + **kwargs, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. + object_queries (`torch.FloatTensor`, *optional*): + object_queries that are added to the hidden states + in the cross-attention layer. + query_position_embeddings (`torch.FloatTensor`, *optional*): + position embeddings that are added to the queries and keys + in the self-attention layer. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + position_embeddings = kwargs.pop("position_embeddings", None) + + if kwargs: + raise ValueError(f"Unexpected arguments {kwargs.keys()}") + + if position_embeddings is not None and object_queries is not None: + raise ValueError( + "Cannot specify both position_embeddings and object_queries. Please use just object_queries" + ) + + if position_embeddings is not None: + logger.warning_once( + "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" + ) + object_queries = position_embeddings + + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + object_queries=query_position_embeddings, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + object_queries=query_position_embeddings, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + spatial_position_embeddings=object_queries, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +class DetrDecoder(nn.Module): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetrDecoderLayer`]. + + The decoder updates the query embeddings through multiple self-attention and cross-attention layers. + + Some small tweaks for DETR: + + - object_queries and query_position_embeddings are added to the forward pass. + - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. + + Args: + config: DetrConfig + """ + + def __init__(self, config: DetrConfig): + super().__init__() + self.config = config + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + + self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)]) + # in DETR, the decoder uses layernorm after the last decoder layer output + self.layernorm = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + object_queries=None, + query_position_embeddings=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The query embeddings that are passed into the decoder. + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: + + - 1 for queries that are **not masked**, + - 0 for queries that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected + in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Position embeddings that are added to the queries and keys in each cross-attention layer. + query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + position_embeddings = kwargs.pop("position_embeddings", None) + if kwargs: + raise ValueError(f"Unexpected arguments {kwargs.keys()}") + + if position_embeddings is not None and object_queries is not None: + raise ValueError( + "Cannot specify both position_embeddings and object_queries. Please use just object_queries" + ) + + if position_embeddings is not None: + logger.warning_once( + "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead" + ) + object_queries = position_embeddings + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is not None: + hidden_states = inputs_embeds + input_shape = inputs_embeds.size()[:-1] + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) + + # optional intermediate hidden states + intermediate = () if self.config.auxiliary_loss else None + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + None, + encoder_hidden_states, + encoder_attention_mask, + None, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=None, + object_queries=object_queries, + query_position_embeddings=query_position_embeddings, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if self.config.auxiliary_loss: + hidden_states = self.layernorm(hidden_states) + intermediate += (hidden_states,) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # finally, apply layernorm + hidden_states = self.layernorm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + # stack intermediate decoder activations + if self.config.auxiliary_loss: + intermediate = torch.stack(intermediate) + + if not return_dict: + return tuple( + v + for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] + if v is not None + ) + return DetrDecoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + intermediate_hidden_states=intermediate, + ) + + +# refactored from original implementation +class MaskFormerHungarianMatcher(nn.Module): + """This class computes an assignment between the labels and the predictions of the network. + + For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more + predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0): + """Creates the matcher + + Params: + cost_class (float, *optional*, defaults to 1.0): + This is the relative weight of the classification error in the matching cost. + cost_mask (float, *optional*, defaults to 1.0): + This is the relative weight of the focal loss of the binary mask in the matching cost. + cost_dice (float, *optional*, defaults to 1.0): + This is the relative weight of the dice loss of the binary mask in the matching cost + """ + super().__init__() + if cost_class == 0 and cost_mask == 0 and cost_dice == 0: + raise ValueError("All costs cant be 0") + self.cost_class = cost_class + self.cost_mask = cost_mask + self.cost_dice = cost_dice + + @torch.no_grad() + def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: + """Performs the matching + + Params: + masks_queries_logits (`torch.Tensor`): + A tensor` of dim `batch_size, num_queries, num_labels` with the + classification logits. + class_queries_logits (`torch.Tensor`): + A tensor` of dim `batch_size, num_queries, height, width` with the + predicted masks. + + class_labels (`torch.Tensor`): + A tensor` of dim `num_target_boxes` (where num_target_boxes is the number + of ground-truth objects in the target) containing the class labels. + mask_labels (`torch.Tensor`): + A tensor` of dim `num_target_boxes, height, width` containing the target + masks. + + Returns: + `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected labels (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes). + """ + indices: List[Tuple[np.array]] = [] + + preds_masks = masks_queries_logits + preds_probs = class_queries_logits + # iterate through batch size + for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): + # downsample the target mask, save memory + target_mask = nn.functional.interpolate(target_mask[:, None], size=pred_mask.shape[-2:], mode="nearest") + pred_probs = pred_probs.softmax(-1) + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -pred_probs[:, labels] + # flatten spatial dimension "q h w -> q (h w)" + pred_mask_flat = pred_mask.flatten(1) # [num_queries, height*width] + # same for target_mask "c h w -> c (h w)" + target_mask_flat = target_mask[:, 0].flatten(1) # [num_total_labels, height*width] + # compute the focal loss between each mask pairs -> shape (num_queries, num_labels) + cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat) + # Compute the dice loss betwen each mask pairs -> shape (num_queries, num_labels) + cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat) + # final cost matrix + cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice + # do the assigmented using the hungarian algorithm in scipy + assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) + indices.append(assigned_indices) + + # It could be stacked in one tensor + matched_indices = [ + (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices + ] + return matched_indices + + def __repr__(self): + head = "Matcher " + self.__class__.__name__ + body = [ + f"cost_class: {self.cost_class}", + f"cost_mask: {self.cost_mask}", + f"cost_dice: {self.cost_dice}", + ] + _repr_indent = 4 + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) + + +# copied and adapted from original implementation +class MaskFormerLoss(nn.Module): + def __init__( + self, + num_labels: int, + matcher: MaskFormerHungarianMatcher, + weight_dict: Dict[str, float], + eos_coef: float, + ): + """ + The MaskFormer Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we compute + hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair of + matched ground-truth / prediction (supervise class and mask) + + Args: + num_labels (`int`): + The number of classes. + matcher (`MaskFormerHungarianMatcher`): + A torch module that computes the assigments between the predictions and labels. + weight_dict (`Dict[str, float]`): + A dictionary of weights to be applied to the different losses. + eos_coef (`float`): + Weight to apply to the null class. + """ + + super().__init__() + requires_backends(self, ["scipy"]) + self.num_labels = num_labels + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + empty_weight = torch.ones(self.num_labels + 1) + empty_weight[-1] = self.eos_coef + self.register_buffer("empty_weight", empty_weight) + + def _max_by_axis(self, the_list: List[List[int]]) -> List[int]: + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]: + # get the maximum size in the batch + max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors]) + batch_size = len(tensors) + # compute finel size + batch_shape = [batch_size] + max_size + b, _, h, w = batch_shape + # get metadata + dtype = tensors[0].dtype + device = tensors[0].device + padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device) + padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device) + # pad the tensors to the size of the biggest one + for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks): + padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor) + padding_mask[: tensor.shape[1], : tensor.shape[2]] = False + + return padded_tensors, padding_masks + + def loss_labels( + self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array] + ) -> Dict[str, Tensor]: + """Compute the losses related to the labels using cross entropy. + + Args: + class_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, num_labels` + class_labels (`List[torch.Tensor]`): + List of class labels of shape `(labels)`. + indices (`Tuple[np.array])`: + The indices computed by the Hungarian matcher. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: + - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. + """ + + pred_logits = class_queries_logits + batch_size, num_queries, _ = pred_logits.shape + criterion = nn.CrossEntropyLoss(weight=self.empty_weight) + idx = self._get_predictions_permutation_indices(indices) + # shape = (batch_size, num_queries) + target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)]) + # shape = (batch_size, num_queries) + target_classes = torch.full( + (batch_size, num_queries), fill_value=self.num_labels, dtype=torch.int64, device=pred_logits.device + ) + target_classes[idx] = target_classes_o + # target_classes is a (batch_size, num_labels, num_queries), we need to permute pred_logits "b q c -> b c q" + pred_logits_transposed = pred_logits.transpose(1, 2) + loss_ce = criterion(pred_logits_transposed, target_classes) + losses = {"loss_cross_entropy": loss_ce} + return losses + + def loss_masks( + self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int + ) -> Dict[str, Tensor]: + """Compute the losses related to the masks using focal and dice loss. + + Args: + masks_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, height, width` + mask_labels (`torch.Tensor`): + List of mask labels of shape `(labels, height, width)`. + indices (`Tuple[np.array])`: + The indices computed by the Hungarian matcher. + num_masks (`int)`: + The number of masks, used for normalization. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: + - **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks. + - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth + masks. + """ + src_idx = self._get_predictions_permutation_indices(indices) + tgt_idx = self._get_targets_permutation_indices(indices) + # shape (batch_size * num_queries, height, width) + pred_masks = masks_queries_logits[src_idx] + # shape (batch_size, num_queries, height, width) + # pad all and stack the targets to the num_labels dimension + target_masks, _ = self._pad_images_to_max_in_batch(mask_labels) + target_masks = target_masks[tgt_idx] + # upsample predictions to the target size, we have to add one dim to use interpolate + pred_masks = nn.functional.interpolate( + pred_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False + ) + pred_masks = pred_masks[:, 0].flatten(1) + + target_masks = target_masks.flatten(1) + losses = { + "loss_mask": sigmoid_focal_loss(pred_masks, target_masks, num_masks), + "loss_dice": dice_loss(pred_masks, target_masks, num_masks), + } + return losses + + def _get_predictions_permutation_indices(self, indices): + # permute predictions following indices + batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + predictions_indices = torch.cat([src for (src, _) in indices]) + return batch_indices, predictions_indices + + def _get_targets_permutation_indices(self, indices): + # permute labels following indices + batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + target_indices = torch.cat([tgt for (_, tgt) in indices]) + return batch_indices, target_indices + + def forward( + self, + masks_queries_logits: Tensor, + class_queries_logits: Tensor, + mask_labels: List[Tensor], + class_labels: List[Tensor], + auxiliary_predictions: Optional[Dict[str, Tensor]] = None, + ) -> Dict[str, Tensor]: + """ + This performs the loss computation. + + Args: + masks_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, height, width` + class_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, num_labels` + mask_labels (`torch.Tensor`): + List of mask labels of shape `(labels, height, width)`. + class_labels (`List[torch.Tensor]`): + List of class labels of shape `(labels)`. + auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*): + if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], then it contains the logits from the + inner layers of the Detr's Decoder. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: + - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. + - **loss_mask** -- The loss computed using sigmoid focal loss on the predicted and ground truth masks. + - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth + masks. + if `use_auxiliary_loss` was set to `true` in [`MaskFormerConfig`], the dictionary contains addional losses + for each auxiliary predictions. + """ + + # retrieve the matching between the outputs of the last layer and the labels + indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels) + # compute the average number of target masks for normalization purposes + num_masks: Number = self.get_num_masks(class_labels, device=class_labels[0].device) + # get all the losses + losses: Dict[str, Tensor] = { + **self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), + **self.loss_labels(class_queries_logits, class_labels, indices), + } + # in case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if auxiliary_predictions is not None: + for idx, aux_outputs in enumerate(auxiliary_predictions): + masks_queries_logits = aux_outputs["masks_queries_logits"] + class_queries_logits = aux_outputs["class_queries_logits"] + loss_dict = self.forward(masks_queries_logits, class_queries_logits, mask_labels, class_labels) + loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()} + losses.update(loss_dict) + + return losses + + def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor: + """ + Computes the average number of target masks across the batch, for normalization purposes. + """ + num_masks = sum([len(classes) for classes in class_labels]) + num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device) + world_size = 1 + if is_accelerate_available(): + if PartialState._shared_state != {}: + num_masks = reduce(num_masks) + world_size = PartialState().num_processes + + num_masks = torch.clamp(num_masks / world_size, min=1) + return num_masks + + +class MaskFormerFPNConvLayer(nn.Module): + def __init__(self, in_features: int, out_features: int, kernel_size: int = 3, padding: int = 1): + """ + A basic module that executes conv - norm - in sequence used in MaskFormer. + + Args: + in_features (`int`): + The number of input features (channels). + out_features (`int`): + The number of outputs features (channels). + """ + super().__init__() + self.layers = [ + nn.Conv2d(in_features, out_features, kernel_size=kernel_size, padding=padding, bias=False), + nn.GroupNorm(32, out_features), + nn.ReLU(inplace=True), + ] + for i, layer in enumerate(self.layers): + # Provide backwards compatibility from when the class inherited from nn.Sequential + # In nn.Sequential subclasses, the name given to the layer is its index in the sequence. + # In nn.Module subclasses they derived from the instance attribute they are assigned to e.g. + # self.my_layer_name = Layer() + # We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register + # explicitly + self.add_module(str(i), layer) + + def forward(self, input: Tensor) -> Tensor: + hidden_state = input + for layer in self.layers: + hidden_state = layer(hidden_state) + return hidden_state + + +class MaskFormerFPNLayer(nn.Module): + def __init__(self, in_features: int, lateral_features: int): + """ + A Feature Pyramid Network Layer (FPN) layer. It creates a feature map by aggregating features from the previous + and backbone layer. Due to the spatial mismatch, the tensor coming from the previous layer is upsampled. + + Args: + in_features (`int`): + The number of input features (channels). + lateral_features (`int`): + The number of lateral features (channels). + """ + super().__init__() + self.proj = nn.Sequential( + nn.Conv2d(lateral_features, in_features, kernel_size=1, padding=0, bias=False), + nn.GroupNorm(32, in_features), + ) + + self.block = MaskFormerFPNConvLayer(in_features, in_features) + + def forward(self, down: Tensor, left: Tensor) -> Tensor: + left = self.proj(left) + down = nn.functional.interpolate(down, size=left.shape[-2:], mode="nearest") + down += left + down = self.block(down) + return down + + +class MaskFormerFPNModel(nn.Module): + def __init__(self, in_features: int, lateral_widths: List[int], feature_size: int = 256): + """ + Feature Pyramid Network, given an input tensor and a set of feature map of different feature/spatial size, it + creates a list of feature maps with the same feature size. + + Args: + in_features (`int`): + The number of input features (channels). + lateral_widths (`List[int]`): + A list with the features (channels) size of each lateral connection. + feature_size (int, *optional*, defaults to 256): + The features (channels) of the resulting feature maps. + """ + super().__init__() + self.stem = MaskFormerFPNConvLayer(in_features, feature_size) + self.layers = nn.Sequential( + *[MaskFormerFPNLayer(feature_size, lateral_width) for lateral_width in lateral_widths[::-1]] + ) + + def forward(self, features: List[Tensor]) -> List[Tensor]: + fpn_features = [] + last_feature = features[-1] + other_features = features[:-1] + output = self.stem(last_feature) + for layer, left in zip(self.layers, other_features[::-1]): + output = layer(output, left) + fpn_features.append(output) + return fpn_features + + +class MaskFormerPixelDecoder(nn.Module): + def __init__(self, *args, feature_size: int = 256, mask_feature_size: int = 256, **kwargs): + r""" + Pixel Decoder Module proposed in [Per-Pixel Classification is Not All You Need for Semantic + Segmentation](https://arxiv.org/abs/2107.06278). It first runs the backbone's features into a Feature Pyramid + Network creating a list of feature maps. Then, it projects the last one to the correct `mask_size`. + + Args: + feature_size (`int`, *optional*, defaults to 256): + The feature size (channel dimension) of the FPN feature maps. + mask_feature_size (`int`, *optional*, defaults to 256): + The features (channels) of the target masks size \\(C_{\epsilon}\\) in the paper. + """ + super().__init__() + + self.fpn = MaskFormerFPNModel(*args, feature_size=feature_size, **kwargs) + self.mask_projection = nn.Conv2d(feature_size, mask_feature_size, kernel_size=3, padding=1) + + def forward( + self, features: List[Tensor], output_hidden_states: bool = False, return_dict: bool = True + ) -> MaskFormerPixelDecoderOutput: + fpn_features = self.fpn(features) + # we use the last feature map + last_feature_projected = self.mask_projection(fpn_features[-1]) + + if not return_dict: + return (last_feature_projected, tuple(fpn_features)) if output_hidden_states else (last_feature_projected,) + + return MaskFormerPixelDecoderOutput( + last_hidden_state=last_feature_projected, hidden_states=tuple(fpn_features) if output_hidden_states else () + ) + + +# copied and adapted from original implementation, also practically equal to DetrSinePositionEmbedding +class MaskFormerSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__( + self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None + ): + super().__init__() + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + self.scale = 2 * math.pi if scale is None else scale + + def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: + if mask is None: + mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + not_mask = (~mask).to(x.dtype) + y_embed = not_mask.cumsum(1) + x_embed = not_mask.cumsum(2) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).type_as(x) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PredictionBlock(nn.Module): + def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: + super().__init__() + self.layers = [nn.Linear(in_dim, out_dim), activation] + # Maintain submodule indexing as if part of a Sequential block + for i, layer in enumerate(self.layers): + self.add_module(str(i), layer) + + def forward(self, input: Tensor) -> Tensor: + hidden_state = input + for layer in self.layers: + hidden_state = layer(hidden_state) + return hidden_state + + +class MaskformerMLPPredictionHead(nn.Module): + def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3): + """ + A classic Multi Layer Perceptron (MLP). + + Args: + input_dim (`int`): + The input dimensions. + hidden_dim (`int`): + The hidden dimensions. + output_dim (`int`): + The output dimensions. + num_layers (int, *optional*, defaults to 3): + The number of layers. + """ + super().__init__() + in_dims = [input_dim] + [hidden_dim] * (num_layers - 1) + out_dims = [hidden_dim] * (num_layers - 1) + [output_dim] + + self.layers = [] + for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)): + activation = nn.ReLU() if i < num_layers - 1 else nn.Identity() + layer = PredictionBlock(in_dim, out_dim, activation=activation) + self.layers.append(layer) + # Provide backwards compatibility from when the class inherited from nn.Sequential + # In nn.Sequential subclasses, the name given to the layer is its index in the sequence. + # In nn.Module subclasses they derived from the instance attribute they are assigned to e.g. + # self.my_layer_name = Layer() + # We can't give instance attributes integer names i.e. self.0 is not permitted and so need to register + # explicitly + self.add_module(str(i), layer) + + def forward(self, input: Tensor) -> Tensor: + hidden_state = input + for layer in self.layers: + hidden_state = layer(hidden_state) + return hidden_state + + +class MaskFormerPixelLevelModule(nn.Module): + def __init__(self, config: MaskFormerConfig): + """ + Pixel Level Module proposed in [Per-Pixel Classification is Not All You Need for Semantic + Segmentation](https://arxiv.org/abs/2107.06278). It runs the input image through a backbone and a pixel + decoder, generating an image feature map and pixel embeddings. + + Args: + config ([`MaskFormerConfig`]): + The configuration used to instantiate this model. + """ + super().__init__() + if getattr(config, "backbone_config") is not None and config.backbone_config.model_type == "swin": + # for backwards compatibility + backbone_config = config.backbone_config + backbone_config = MaskFormerSwinConfig.from_dict(backbone_config.to_dict()) + backbone_config.out_features = ["stage1", "stage2", "stage3", "stage4"] + config.backbone_config = backbone_config + self.encoder = load_backbone(config) + + feature_channels = self.encoder.channels + self.decoder = MaskFormerPixelDecoder( + in_features=feature_channels[-1], + feature_size=config.fpn_feature_size, + mask_feature_size=config.mask_feature_size, + lateral_widths=feature_channels[:-1], + ) + + def forward( + self, pixel_values: Tensor, output_hidden_states: bool = False, return_dict: bool = True + ) -> MaskFormerPixelLevelModuleOutput: + features = self.encoder(pixel_values).feature_maps + decoder_output = self.decoder(features, output_hidden_states, return_dict=return_dict) + + if not return_dict: + last_hidden_state = decoder_output[0] + outputs = (features[-1], last_hidden_state) + if output_hidden_states: + hidden_states = decoder_output[1] + outputs = outputs + (tuple(features),) + (hidden_states,) + return outputs + + return MaskFormerPixelLevelModuleOutput( + # the last feature is actually the output from the last layer + encoder_last_hidden_state=features[-1], + decoder_last_hidden_state=decoder_output.last_hidden_state, + encoder_hidden_states=tuple(features) if output_hidden_states else (), + decoder_hidden_states=decoder_output.hidden_states if output_hidden_states else (), + ) + + +class MaskFormerTransformerModule(nn.Module): + """ + The MaskFormer's transformer module. + """ + + def __init__(self, in_features: int, config: MaskFormerConfig): + super().__init__() + hidden_size = config.decoder_config.hidden_size + should_project = in_features != hidden_size + self.position_embedder = MaskFormerSinePositionEmbedding(num_pos_feats=hidden_size // 2, normalize=True) + self.queries_embedder = nn.Embedding(config.decoder_config.num_queries, hidden_size) + self.input_projection = nn.Conv2d(in_features, hidden_size, kernel_size=1) if should_project else None + self.decoder = DetrDecoder(config=config.decoder_config) + + def forward( + self, + image_features: Tensor, + output_hidden_states: bool = False, + output_attentions: bool = False, + return_dict: Optional[bool] = None, + ) -> DetrDecoderOutput: + if self.input_projection is not None: + image_features = self.input_projection(image_features) + object_queries = self.position_embedder(image_features) + # repeat the queries "q c -> b q c" + batch_size = image_features.shape[0] + queries_embeddings = self.queries_embedder.weight.unsqueeze(0).repeat(batch_size, 1, 1) + inputs_embeds = torch.zeros_like(queries_embeddings, requires_grad=True) + + batch_size, num_channels, height, width = image_features.shape + # rearrange both image_features and object_queries "b c h w -> b (h w) c" + image_features = image_features.view(batch_size, num_channels, height * width).permute(0, 2, 1) + object_queries = object_queries.view(batch_size, num_channels, height * width).permute(0, 2, 1) + + decoder_output: DetrDecoderOutput = self.decoder( + inputs_embeds=inputs_embeds, + attention_mask=None, + encoder_hidden_states=image_features, + encoder_attention_mask=None, + object_queries=object_queries, + query_position_embeddings=queries_embeddings, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + return decoder_output + + +MASKFORMER_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`MaskFormerConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MASKFORMER_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`MaskFormerImageProcessor.__call__`] for details. + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of Detr's decoder attention layers. + return_dict (`bool`, *optional*): + Whether or not to return a [`~MaskFormerModelOutput`] instead of a plain tuple. +""" + + +class MaskFormerPreTrainedModel(PreTrainedModel): + config_class = MaskFormerConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + + def _init_weights(self, module: nn.Module): + xavier_std = self.config.init_xavier_std + std = self.config.init_std + if isinstance(module, MaskFormerTransformerModule): + if module.input_projection is not None: + nn.init.xavier_uniform_(module.input_projection.weight, gain=xavier_std) + nn.init.constant_(module.input_projection.bias, 0) + # FPN + elif isinstance(module, MaskFormerFPNModel): + nn.init.xavier_uniform_(module.stem.get_submodule("0").weight, gain=xavier_std) + + elif isinstance(module, MaskFormerFPNLayer): + nn.init.xavier_uniform_(module.proj[0].weight, gain=xavier_std) + + elif isinstance(module, MaskFormerFPNConvLayer): + nn.init.xavier_uniform_(module.get_submodule("0").weight, gain=xavier_std) + # The MLP head + elif isinstance(module, MaskformerMLPPredictionHead): + # I was not able to find the correct initializer in the original implementation + # we'll use xavier + for submodule in module.modules(): + if isinstance(submodule, nn.Linear): + nn.init.xavier_uniform_(submodule.weight, gain=xavier_std) + nn.init.constant_(submodule.bias, 0) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + # copied from DETR + if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +@add_start_docstrings( + "The bare MaskFormer Model outputting raw hidden-states without any specific head on top.", + MASKFORMER_START_DOCSTRING, +) +class MaskFormerModel(MaskFormerPreTrainedModel): + def __init__(self, config: MaskFormerConfig): + super().__init__(config) + self.pixel_level_module = MaskFormerPixelLevelModule(config) + self.transformer_module = MaskFormerTransformerModule( + in_features=self.pixel_level_module.encoder.channels[-1], config=config + ) + + self.post_init() + + @add_start_docstrings_to_model_forward(MASKFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MaskFormerModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Tensor, + pixel_mask: Optional[Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> MaskFormerModelOutput: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, MaskFormerModel + >>> from PIL import Image + >>> import requests + + >>> # load MaskFormer fine-tuned on ADE20k semantic segmentation + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade") + >>> model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-base-ade") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = image_processor(image, return_tensors="pt") + + >>> # forward pass + >>> outputs = model(**inputs) + + >>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size) + >>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state + >>> list(transformer_decoder_last_hidden_state.shape) + [1, 100, 256] + ```""" + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, _, height, width = pixel_values.shape + + if pixel_mask is None: + pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device) + + pixel_level_module_output = self.pixel_level_module( + pixel_values, output_hidden_states, return_dict=return_dict + ) + image_features = pixel_level_module_output[0] + pixel_embeddings = pixel_level_module_output[1] + + transformer_module_output = self.transformer_module(image_features, output_hidden_states, output_attentions) + queries = transformer_module_output.last_hidden_state + + encoder_hidden_states = None + pixel_decoder_hidden_states = None + transformer_decoder_hidden_states = None + hidden_states = None + + if output_hidden_states: + encoder_hidden_states = pixel_level_module_output[2] + pixel_decoder_hidden_states = pixel_level_module_output[3] + transformer_decoder_hidden_states = transformer_module_output[1] + hidden_states = encoder_hidden_states + pixel_decoder_hidden_states + transformer_decoder_hidden_states + + output = MaskFormerModelOutput( + encoder_last_hidden_state=image_features, + pixel_decoder_last_hidden_state=pixel_embeddings, + transformer_decoder_last_hidden_state=queries, + encoder_hidden_states=encoder_hidden_states, + pixel_decoder_hidden_states=pixel_decoder_hidden_states, + transformer_decoder_hidden_states=transformer_decoder_hidden_states, + hidden_states=hidden_states, + attentions=transformer_module_output.attentions, + ) + + if not return_dict: + output = tuple(v for v in output.values()) + + return output + + +class MaskFormerForInstanceSegmentation(MaskFormerPreTrainedModel): + def __init__(self, config: MaskFormerConfig): + super().__init__(config) + self.model = MaskFormerModel(config) + hidden_size = config.decoder_config.hidden_size + # + 1 because we add the "null" class + self.class_predictor = nn.Linear(hidden_size, config.num_labels + 1) + self.mask_embedder = MaskformerMLPPredictionHead(hidden_size, hidden_size, config.mask_feature_size) + + self.matcher = MaskFormerHungarianMatcher( + cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight + ) + + self.weight_dict: Dict[str, float] = { + "loss_cross_entropy": config.cross_entropy_weight, + "loss_mask": config.mask_weight, + "loss_dice": config.dice_weight, + } + + self.criterion = MaskFormerLoss( + config.num_labels, + matcher=self.matcher, + weight_dict=self.weight_dict, + eos_coef=config.no_object_weight, + ) + + self.post_init() + + def get_loss_dict( + self, + masks_queries_logits: Tensor, + class_queries_logits: Tensor, + mask_labels: Tensor, + class_labels: Tensor, + auxiliary_logits: Dict[str, Tensor], + ) -> Dict[str, Tensor]: + loss_dict: Dict[str, Tensor] = self.criterion( + masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits + ) + # weight each loss by `self.weight_dict[]` including auxiliary losses + for key, weight in self.weight_dict.items(): + for loss_key, loss in loss_dict.items(): + if key in loss_key: + loss *= weight + + return loss_dict + + def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor: + return sum(loss_dict.values()) + + def get_logits(self, outputs: MaskFormerModelOutput) -> Tuple[Tensor, Tensor, Dict[str, Tensor]]: + pixel_embeddings = outputs.pixel_decoder_last_hidden_state + # get the auxiliary predictions (one for each decoder's layer) + auxiliary_logits: List[str, Tensor] = [] + + is_tracing = ( + torch.jit.is_tracing() + or isinstance(outputs, torch.fx.Proxy) + or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling()) + ) + # This code is a little bit cumbersome, an improvement can be to return a list of predictions. If we have auxiliary loss then we are going to return more than one element in the list + if self.config.use_auxiliary_loss: + stacked_transformer_decoder_outputs = torch.stack(outputs.transformer_decoder_hidden_states) + classes = self.class_predictor(stacked_transformer_decoder_outputs) + class_queries_logits = classes[-1] + # get the masks + mask_embeddings = self.mask_embedder(stacked_transformer_decoder_outputs) + + if is_tracing and not is_torch_greater_or_equal_than_2_1: + # Equivalent to einsum('lbqc, bchw -> lbqhw') but jit friendly + num_embeddings, batch_size, num_queries, num_channels = mask_embeddings.shape + _, _, height, width = pixel_embeddings.shape + binaries_masks = torch.zeros( + (num_embeddings, batch_size, num_queries, height, width), device=mask_embeddings.device + ) + for c in range(num_channels): + binaries_masks += mask_embeddings[..., c][..., None, None] * pixel_embeddings[None, :, None, c] + else: + binaries_masks = torch.einsum("lbqc, bchw -> lbqhw", mask_embeddings, pixel_embeddings) + + masks_queries_logits = binaries_masks[-1] + # go til [:-1] because the last one is always used + for aux_binary_masks, aux_classes in zip(binaries_masks[:-1], classes[:-1]): + auxiliary_logits.append( + {"masks_queries_logits": aux_binary_masks, "class_queries_logits": aux_classes} + ) + + else: + transformer_decoder_hidden_states = outputs.transformer_decoder_last_hidden_state + classes = self.class_predictor(transformer_decoder_hidden_states) + class_queries_logits = classes + # get the masks + mask_embeddings = self.mask_embedder(transformer_decoder_hidden_states) + # sum up over the channels + + if is_tracing and not is_torch_greater_or_equal_than_2_1: + # Equivalent to einsum('bqc, bchw -> bqhw') but jit friendly + batch_size, num_queries, num_channels = mask_embeddings.shape + _, _, height, width = pixel_embeddings.shape + masks_queries_logits = torch.zeros( + (batch_size, num_queries, height, width), device=mask_embeddings.device + ) + for c in range(num_channels): + masks_queries_logits += mask_embeddings[..., c][..., None, None] * pixel_embeddings[:, None, c] + else: + masks_queries_logits = torch.einsum("bqc, bchw -> bqhw", mask_embeddings, pixel_embeddings) + + return class_queries_logits, masks_queries_logits, auxiliary_logits + + @add_start_docstrings_to_model_forward(MASKFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MaskFormerForInstanceSegmentationOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Tensor, + mask_labels: Optional[List[Tensor]] = None, + class_labels: Optional[List[Tensor]] = None, + pixel_mask: Optional[Tensor] = None, + output_auxiliary_logits: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> MaskFormerForInstanceSegmentationOutput: + r""" + mask_labels (`List[torch.Tensor]`, *optional*): + List of mask labels of shape `(num_labels, height, width)` to be fed to a model + class_labels (`List[torch.LongTensor]`, *optional*): + list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the + labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. + + Returns: + + Examples: + + Semantic segmentation example: + + ```python + >>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation + >>> from PIL import Image + >>> import requests + + >>> # load MaskFormer fine-tuned on ADE20k semantic segmentation + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade") + >>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-ade") + + >>> url = ( + ... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" + ... ) + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> inputs = image_processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` + >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` + >>> class_queries_logits = outputs.class_queries_logits + >>> masks_queries_logits = outputs.masks_queries_logits + + >>> # you can pass them to image_processor for postprocessing + >>> predicted_semantic_map = image_processor.post_process_semantic_segmentation( + ... outputs, target_sizes=[image.size[::-1]] + ... )[0] + + >>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) + >>> list(predicted_semantic_map.shape) + [512, 683] + ``` + + Panoptic segmentation example: + + ```python + >>> from transformers import AutoImageProcessor, MaskFormerForInstanceSegmentation + >>> from PIL import Image + >>> import requests + + >>> # load MaskFormer fine-tuned on COCO panoptic segmentation + >>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-coco") + >>> model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-base-coco") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> inputs = image_processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` + >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` + >>> class_queries_logits = outputs.class_queries_logits + >>> masks_queries_logits = outputs.masks_queries_logits + + >>> # you can pass them to image_processor for postprocessing + >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] + + >>> # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) + >>> predicted_panoptic_map = result["segmentation"] + >>> list(predicted_panoptic_map.shape) + [480, 640] + ``` + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + raw_outputs = self.model( + pixel_values, + pixel_mask, + output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, + return_dict=return_dict, + output_attentions=output_attentions, + ) + # We need to have raw_outputs optionally be returned as a dict to use torch.compile. For backwards + # compatibility we convert to a dataclass for the rest of the model logic + outputs = MaskFormerModelOutput( + encoder_last_hidden_state=raw_outputs[0], + pixel_decoder_last_hidden_state=raw_outputs[1], + transformer_decoder_last_hidden_state=raw_outputs[2], + encoder_hidden_states=raw_outputs[3] if output_hidden_states else None, + pixel_decoder_hidden_states=raw_outputs[4] if output_hidden_states else None, + transformer_decoder_hidden_states=raw_outputs[5] if output_hidden_states else None, + hidden_states=raw_outputs[6] if output_hidden_states else None, + attentions=raw_outputs[-1] if output_attentions else None, + ) + + loss, loss_dict, auxiliary_logits = None, None, None + + class_queries_logits, masks_queries_logits, auxiliary_logits = self.get_logits(outputs) + + if mask_labels is not None and class_labels is not None: + loss_dict: Dict[str, Tensor] = self.get_loss_dict( + masks_queries_logits, class_queries_logits, mask_labels, class_labels, auxiliary_logits + ) + loss = self.get_loss(loss_dict) + + output_auxiliary_logits = ( + self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits + ) + if not output_auxiliary_logits: + auxiliary_logits = None + + if not return_dict: + output = tuple( + v + for v in (loss, class_queries_logits, masks_queries_logits, auxiliary_logits, *outputs.values()) + if v is not None + ) + return output + + return MaskFormerForInstanceSegmentationOutput( + loss=loss, + **outputs, + class_queries_logits=class_queries_logits, + masks_queries_logits=masks_queries_logits, + auxiliary_logits=auxiliary_logits, + )