diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..50a8fa0bb8b5213aeda9f92f3ce1c3f6fd2b62dc --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78360f4d1d772a77788debccedf58c109bba5d80fdac54fd112422a1dbdbd642 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c3cbfe170f2a6d29f4955e82fb07fb6141d74ec4 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a8bb42223e43c01e253aaca28b07dfbb8a56a6dae8a7f4acd6e4350cb6f13da +size 33555612 diff --git a/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..01c42bdade9054a541b6a0e919ec5b61687c81f5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a13e60f37f4c984daf36f763810991ee3a752924bc08fdbe2733726afca78c2b +size 33555627 diff --git a/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..06e8afc3be8bdcfdbb5825c98b14707aa8b3b12e --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f239c8632c49f505f40b311dbc4c31fae11fc587f4b321f712995de2b4fdfd07 +size 33555533 diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fba2107e20f91e50460e6f6f06ce47b9e93aeb8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682654 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..a76f3b759372b818a2216d8470f1d15441c9d4e1 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:30:54,824 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:59,283 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:59,285 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:59,285 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step120'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step120'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c03cdf29605c48172f0d52910c00726c08d0aeb2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:54.693726", + "startedAt": "2024-05-14T10:30:54.279381", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3391.302519736842, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3350.795, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3285.487, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92251205444336 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..01b1b3e3e97a0b05ca000647393a059738a2fa22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 10:30:54,299 INFO StreamThr :8265 [internal.py:wandb_internal():85] W&B internal server running at pid: 8265, started at: 2024-05-14 10:30:54.298530 +2024-05-14 10:30:54,301 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:30:54,302 INFO WriterThread:8265 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/run-cudpayxz.wandb +2024-05-14 10:30:54,303 DEBUG SenderThread:8265 [sender.py:send():378] send: header +2024-05-14 10:30:54,312 DEBUG SenderThread:8265 [sender.py:send():378] send: run +2024-05-14 10:30:54,553 INFO SenderThread:8265 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files +2024-05-14 10:30:54,553 INFO SenderThread:8265 [sender.py:_start_run_threads():1123] run started: cudpayxz with start time 1715682654.298488 +2024-05-14 10:30:54,561 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:30:54,561 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:30:54,645 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:30:54,647 DEBUG HandlerThread:8265 [system_info.py:__init__():26] System info init +2024-05-14 10:30:54,647 DEBUG HandlerThread:8265 [system_info.py:__init__():41] System info init done +2024-05-14 10:30:54,647 INFO HandlerThread:8265 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:30:54,647 INFO SystemMonitor:8265 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:30:54,647 INFO HandlerThread:8265 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:30:54,648 INFO SystemMonitor:8265 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:30:54,648 INFO SystemMonitor:8265 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:30:54,650 INFO SystemMonitor:8265 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:30:54,651 INFO SystemMonitor:8265 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:30:54,693 DEBUG HandlerThread:8265 [system_info.py:probe():150] Probing system +2024-05-14 10:30:54,701 DEBUG HandlerThread:8265 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:30:54,722 ERROR HandlerThread:8265 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:30:54,722 DEBUG HandlerThread:8265 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:30:54,722 DEBUG HandlerThread:8265 [system_info.py:probe():198] Probing system done +2024-05-14 10:30:54,722 DEBUG HandlerThread:8265 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:54.693726', 'startedAt': '2024-05-14T10:30:54.279381', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3391.302519736842, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3350.795, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3285.487, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92251205444336}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:30:54,722 INFO HandlerThread:8265 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:30:54,722 INFO HandlerThread:8265 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:30:54,723 INFO HandlerThread:8265 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:30:54,727 DEBUG SenderThread:8265 [sender.py:send():378] send: files +2024-05-14 10:30:54,727 INFO SenderThread:8265 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:30:54,821 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:30:54,822 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:54,822 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:30:54,823 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:55,009 DEBUG SenderThread:8265 [sender.py:send():378] send: telemetry +2024-05-14 10:30:55,237 INFO wandb-upload_0:8265 [upload_job.py:push():130] Uploaded file /tmp/tmpp868mnxywandb/cpjled7s-wandb-metadata.json +2024-05-14 10:30:55,555 INFO Thread-12 :8265 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt +2024-05-14 10:30:55,555 INFO Thread-12 :8265 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-metadata.json +2024-05-14 10:30:55,555 INFO Thread-12 :8265 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log +2024-05-14 10:30:57,555 INFO Thread-12 :8265 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log +2024-05-14 10:31:00,287 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:00,376 DEBUG SenderThread:8265 [sender.py:send():378] send: exit +2024-05-14 10:31:00,376 INFO SenderThread:8265 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:31:00,376 INFO SenderThread:8265 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:31:00,377 INFO SenderThread:8265 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:00,377 INFO SenderThread:8265 [sender.py:send_exit():593] send defer +2024-05-14 10:31:00,377 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,377 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:31:00,378 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,378 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:31:00,378 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:31:00,378 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,378 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:31:00,378 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,378 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:31:00,378 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:31:00,378 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,378 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:31:00,378 INFO HandlerThread:8265 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:31:00,378 INFO HandlerThread:8265 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:31:00,379 INFO HandlerThread:8265 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:31:00,379 DEBUG SystemMonitor:8265 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:31:00,379 INFO HandlerThread:8265 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:31:00,379 DEBUG SystemMonitor:8265 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:31:00,379 INFO HandlerThread:8265 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:31:00,379 DEBUG SystemMonitor:8265 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:31:00,381 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,381 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:31:00,381 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:31:00,381 DEBUG SenderThread:8265 [sender.py:send():378] send: stats +2024-05-14 10:31:00,381 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,381 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:31:00,381 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,381 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:31:00,381 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:31:00,382 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,382 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:31:00,382 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,382 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:31:00,382 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:31:00,382 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,382 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:31:00,382 DEBUG SenderThread:8265 [sender.py:send():378] send: summary +2024-05-14 10:31:00,383 INFO SenderThread:8265 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:00,383 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,383 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:31:00,383 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:31:00,383 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,383 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:31:00,383 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,383 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:31:00,385 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:00,470 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:31:00,470 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,470 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:31:00,470 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,470 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:31:00,557 INFO Thread-12 :8265 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml +2024-05-14 10:31:00,557 INFO Thread-12 :8265 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json +2024-05-14 10:31:01,019 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:31:01,019 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,019 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:31:01,020 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,020 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:31:01,020 INFO SenderThread:8265 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:31:01,020 INFO SenderThread:8265 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:31:01,020 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:31:01,020 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,020 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:31:01,020 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,020 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:31:01,020 INFO SenderThread:8265 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:31:01,376 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:01,558 INFO SenderThread:8265 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log +2024-05-14 10:31:01,558 INFO SenderThread:8265 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files +2024-05-14 10:31:01,558 INFO SenderThread:8265 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log output.log +2024-05-14 10:31:01,558 INFO SenderThread:8265 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml config.yaml +2024-05-14 10:31:01,558 INFO SenderThread:8265 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json wandb-summary.json +2024-05-14 10:31:01,560 INFO SenderThread:8265 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:31:01,560 INFO SenderThread:8265 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt requirements.txt +2024-05-14 10:31:01,563 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:31:01,563 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:01,564 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:01,564 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:31:01,565 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:01,565 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:31:01,565 INFO SenderThread:8265 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:01,786 INFO wandb-upload_0:8265 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/output.log +2024-05-14 10:31:01,965 INFO wandb-upload_1:8265 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/config.yaml +2024-05-14 10:31:02,039 INFO wandb-upload_3:8265 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/requirements.txt +2024-05-14 10:31:02,048 INFO wandb-upload_2:8265 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/files/wandb-summary.json +2024-05-14 10:31:02,248 INFO Thread-11 (_thread_body):8265 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:31:02,249 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,249 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:31:02,249 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,250 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:31:02,250 INFO SenderThread:8265 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:31:02,250 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:31:02,250 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,250 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:31:02,250 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,250 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:31:02,250 INFO SenderThread:8265 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:02,376 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,482 INFO SenderThread:8265 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:02,482 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:02,482 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,482 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,482 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:02,483 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,483 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:02,483 INFO SenderThread:8265 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:02,483 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:02,483 DEBUG SenderThread:8265 [sender.py:send():378] send: final +2024-05-14 10:31:02,483 INFO HandlerThread:8265 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:02,483 DEBUG SenderThread:8265 [sender.py:send():378] send: footer +2024-05-14 10:31:02,483 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:02,483 INFO SenderThread:8265 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:02,484 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,484 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,484 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:02,484 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:02,484 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:02,484 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:02,484 DEBUG SenderThread:8265 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:02,485 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:02,486 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:02,539 INFO MainThread:8265 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:02,539 INFO MainThread:8265 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:02,539 INFO MainThread:8265 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:02,539 DEBUG HandlerThread:8265 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:02,539 INFO HandlerThread:8265 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:03,484 INFO WriterThread:8265 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/run-cudpayxz.wandb +2024-05-14 10:31:03,539 INFO SenderThread:8265 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:03,539 INFO SenderThread:8265 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:03,539 INFO SenderThread:8265 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..da5f45223a31fad67d92195a121e9b40dbd994af --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Configure stats pid to 6973 +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:30:54,295 WARNING MainThread:6973 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug.log +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/logs/debug-internal.log +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:init():610] starting backend +2024-05-14 10:30:54,295 INFO MainThread:6973 [wandb_init.py:init():614] setting up manager +2024-05-14 10:30:54,297 INFO MainThread:6973 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:30:54,298 INFO MainThread:6973 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:30:54,301 INFO MainThread:6973 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:30:54,311 INFO MainThread:6973 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:30:54,561 INFO MainThread:6973 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:30:54,641 INFO MainThread:6973 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:30:54,641 INFO MainThread:6973 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:30:54,822 INFO MainThread:6973 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:30:54,822 INFO MainThread:6973 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:30:54,822 INFO MainThread:6973 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:30:54,822 INFO MainThread:6973 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:30:54,823 INFO MainThread:6973 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:31:03,540 WARNING MsgRouterThr:6973 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/run-cudpayxz.wandb b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/run-cudpayxz.wandb new file mode 100644 index 0000000000000000000000000000000000000000..15b24eba1a50309b5b21883cbcca48469452e811 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103054-cudpayxz/run-cudpayxz.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed5c9927301cee17218593c3d72e530127e3ddc7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704468 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..6b47906eba2fa3380d566411fe048ee51b40391f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-14 16:34:28,720 INFO StreamThr :118224 [internal.py:wandb_internal():85] W&B internal server running at pid: 118224, started at: 2024-05-14 16:34:28.719311 +2024-05-14 16:34:28,721 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:34:28,723 INFO WriterThread:118224 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/run-dlwc10vq.wandb +2024-05-14 16:34:28,723 DEBUG SenderThread:118224 [sender.py:send():378] send: header +2024-05-14 16:34:28,734 DEBUG SenderThread:118224 [sender.py:send():378] send: run +2024-05-14 16:34:28,958 INFO SenderThread:118224 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files +2024-05-14 16:34:28,958 INFO SenderThread:118224 [sender.py:_start_run_threads():1123] run started: dlwc10vq with start time 1715704468.719215 +2024-05-14 16:34:28,965 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:34:28,965 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:34:29,049 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:34:29,050 DEBUG HandlerThread:118224 [system_info.py:__init__():26] System info init +2024-05-14 16:34:29,051 DEBUG HandlerThread:118224 [system_info.py:__init__():41] System info init done +2024-05-14 16:34:29,051 INFO HandlerThread:118224 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:34:29,051 INFO SystemMonitor:118224 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:34:29,051 INFO HandlerThread:118224 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:34:29,052 INFO SystemMonitor:118224 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:34:29,052 INFO SystemMonitor:118224 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:34:29,053 INFO SystemMonitor:118224 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:34:29,053 INFO SystemMonitor:118224 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:34:29,154 DEBUG HandlerThread:118224 [system_info.py:probe():150] Probing system +2024-05-14 16:34:29,162 DEBUG HandlerThread:118224 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:34:29,183 ERROR HandlerThread:118224 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:34:29,183 DEBUG HandlerThread:118224 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:34:29,183 DEBUG HandlerThread:118224 [system_info.py:probe():198] Probing system done +2024-05-14 16:34:29,183 DEBUG HandlerThread:118224 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:34:29.154200', 'startedAt': '2024-05-14T16:34:28.708355', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3396.061605263158, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3280.656, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3284.865, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3223.887, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3244.473, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3291.536, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3206.218, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3220.102, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.5445137023926}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:34:29,183 INFO HandlerThread:118224 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:34:29,183 INFO HandlerThread:118224 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:34:29,185 INFO HandlerThread:118224 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:34:29,188 DEBUG SenderThread:118224 [sender.py:send():378] send: files +2024-05-14 16:34:29,188 INFO SenderThread:118224 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:34:29,282 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:34:29,282 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:34:29,282 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:34:29,283 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:34:29,400 DEBUG SenderThread:118224 [sender.py:send():378] send: telemetry +2024-05-14 16:34:29,684 INFO wandb-upload_0:118224 [upload_job.py:push():130] Uploaded file /tmp/tmp7vueuu_owandb/84y1w5w8-wandb-metadata.json +2024-05-14 16:34:29,959 INFO Thread-12 :118224 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json +2024-05-14 16:34:29,959 INFO Thread-12 :118224 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt +2024-05-14 16:34:29,960 INFO Thread-12 :118224 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:31,960 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:33,983 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:35,962 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:39,222 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:39,974 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:43,977 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:44,282 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:34:44,283 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:34:44,445 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:45,978 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:47,980 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:50,279 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:52,301 DEBUG SenderThread:118224 [sender.py:send():378] send: exit +2024-05-14 16:34:52,301 INFO SenderThread:118224 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:34:52,301 INFO SenderThread:118224 [sender.py:send_exit():587] handling runtime: 23 +2024-05-14 16:34:52,302 INFO SenderThread:118224 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:34:52,302 INFO SenderThread:118224 [sender.py:send_exit():593] send defer +2024-05-14 16:34:52,302 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,302 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:34:52,303 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,303 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:34:52,303 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:34:52,303 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,303 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:34:52,303 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,303 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:34:52,303 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:34:52,303 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,303 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:34:52,303 INFO HandlerThread:118224 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:34:52,303 DEBUG SystemMonitor:118224 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:34:52,304 INFO HandlerThread:118224 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:34:52,304 DEBUG SystemMonitor:118224 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:34:52,304 INFO HandlerThread:118224 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:34:52,304 DEBUG SystemMonitor:118224 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:34:52,304 INFO HandlerThread:118224 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:34:52,306 INFO HandlerThread:118224 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:34:52,306 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,306 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:34:52,306 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:34:52,307 DEBUG SenderThread:118224 [sender.py:send():378] send: stats +2024-05-14 16:34:52,307 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,307 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:34:52,307 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,308 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:34:52,308 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:34:52,308 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,308 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:34:52,308 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,308 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:34:52,308 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:34:52,308 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,308 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:34:52,308 DEBUG SenderThread:118224 [sender.py:send():378] send: summary +2024-05-14 16:34:52,309 INFO SenderThread:118224 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:34:52,309 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,309 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:34:52,309 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:34:52,309 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,309 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:34:52,309 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,309 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:34:52,312 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:52,397 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:34:52,397 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,398 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:34:52,398 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,398 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:34:52,983 INFO Thread-12 :118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml +2024-05-14 16:34:52,983 INFO Thread-12 :118224 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json +2024-05-14 16:34:53,301 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:55,221 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:34:55,222 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:55,222 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,222 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:34:55,222 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,222 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:34:55,222 INFO SenderThread:118224 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:34:55,223 INFO SenderThread:118224 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:34:55,223 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:34:55,223 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,223 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:34:55,223 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,223 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:34:55,223 INFO SenderThread:118224 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:34:55,301 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:55,984 INFO SenderThread:118224 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:55,985 INFO SenderThread:118224 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files +2024-05-14 16:34:55,985 INFO SenderThread:118224 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml config.yaml +2024-05-14 16:34:55,985 INFO SenderThread:118224 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json wandb-summary.json +2024-05-14 16:34:55,985 INFO SenderThread:118224 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log output.log +2024-05-14 16:34:55,987 INFO SenderThread:118224 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:34:55,989 INFO SenderThread:118224 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt requirements.txt +2024-05-14 16:34:55,991 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:34:55,991 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:55,992 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,992 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:34:55,993 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,993 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:34:55,993 INFO SenderThread:118224 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:34:56,214 INFO wandb-upload_0:118224 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/config.yaml +2024-05-14 16:34:56,302 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,302 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,412 INFO wandb-upload_1:118224 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json +2024-05-14 16:34:56,454 INFO wandb-upload_2:118224 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log +2024-05-14 16:34:56,473 INFO wandb-upload_3:118224 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/requirements.txt +2024-05-14 16:34:56,673 INFO Thread-11 (_thread_body):118224 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:34:56,673 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,673 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:34:56,675 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,675 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:34:56,675 INFO SenderThread:118224 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:34:56,675 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:34:56,675 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,675 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:34:56,675 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,675 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:34:56,675 INFO SenderThread:118224 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:34:56,733 INFO SenderThread:118224 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:34:56,733 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:34:56,733 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,733 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:34:56,733 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,733 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:34:56,733 INFO SenderThread:118224 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:34:56,733 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,733 DEBUG SenderThread:118224 [sender.py:send():378] send: final +2024-05-14 16:34:56,733 INFO HandlerThread:118224 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:34:56,733 DEBUG SenderThread:118224 [sender.py:send():378] send: footer +2024-05-14 16:34:56,734 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,734 INFO SenderThread:118224 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:34:56,734 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,734 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,734 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,734 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:34:56,735 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,735 DEBUG SenderThread:118224 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:34:56,735 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:34:56,736 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:34:56,736 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:34:56,788 INFO MainThread:118224 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:34:56,788 INFO MainThread:118224 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:34:56,788 INFO MainThread:118224 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:34:56,788 DEBUG HandlerThread:118224 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:34:56,788 INFO HandlerThread:118224 [handler.py:finish():882] shutting down handler +2024-05-14 16:34:57,735 INFO WriterThread:118224 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/run-dlwc10vq.wandb +2024-05-14 16:34:57,788 INFO SenderThread:118224 [sender.py:finish():1545] shutting down sender +2024-05-14 16:34:57,788 INFO SenderThread:118224 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:34:57,788 INFO SenderThread:118224 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..5cb19b4af6092726cdcfd6c8163775c14d7b288a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Configure stats pid to 116795 +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:34:28,716 WARNING MainThread:116795 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug.log +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/logs/debug-internal.log +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:init():610] starting backend +2024-05-14 16:34:28,716 INFO MainThread:116795 [wandb_init.py:init():614] setting up manager +2024-05-14 16:34:28,718 INFO MainThread:116795 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:34:28,719 INFO MainThread:116795 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:34:28,721 INFO MainThread:116795 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:34:28,733 INFO MainThread:116795 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:34:28,964 INFO MainThread:116795 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:34:29,044 INFO MainThread:116795 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:34:29,044 INFO MainThread:116795 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:34:29,283 INFO MainThread:116795 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:34:29,283 INFO MainThread:116795 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:34:29,283 INFO MainThread:116795 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:34:29,283 INFO MainThread:116795 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:34:29,284 INFO MainThread:116795 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:34:57,789 WARNING MsgRouterThr:116795 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/run-dlwc10vq.wandb b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/run-dlwc10vq.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e88026444be232fc33576d71a561ff96a17289ca Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/run-dlwc10vq.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..770bd3a3bacd57b25320b34641708cdf1cd2fa1d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704623 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..3c574e02f18b031969a712ee2c0e06f59b27e980 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:37:04,300 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:37:08,974 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:37:08,977 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:37:08,977 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step20 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step20/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..591380a1ee0abe861e65e0c00bc5aa39384618f1 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:37:04.166942", + "startedAt": "2024-05-14T16:37:03.738413", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3390.131605263158, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3317.565, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3317.565, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3317.564, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3317.869, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.996, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3317.558, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3211.982, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4235954284668 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..4cd86d43ac0a4a56cb28837b504856cd22f14a8f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 16:37:03,749 INFO StreamThr :127620 [internal.py:wandb_internal():85] W&B internal server running at pid: 127620, started at: 2024-05-14 16:37:03.748686 +2024-05-14 16:37:03,751 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:37:03,751 INFO WriterThread:127620 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/run-t0yeqrn0.wandb +2024-05-14 16:37:03,752 DEBUG SenderThread:127620 [sender.py:send():378] send: header +2024-05-14 16:37:03,763 DEBUG SenderThread:127620 [sender.py:send():378] send: run +2024-05-14 16:37:04,027 INFO SenderThread:127620 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files +2024-05-14 16:37:04,027 INFO SenderThread:127620 [sender.py:_start_run_threads():1123] run started: t0yeqrn0 with start time 1715704623.748309 +2024-05-14 16:37:04,034 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:37:04,034 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:37:04,114 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:37:04,116 DEBUG HandlerThread:127620 [system_info.py:__init__():26] System info init +2024-05-14 16:37:04,116 DEBUG HandlerThread:127620 [system_info.py:__init__():41] System info init done +2024-05-14 16:37:04,116 INFO HandlerThread:127620 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:37:04,116 INFO SystemMonitor:127620 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:37:04,116 INFO HandlerThread:127620 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:37:04,116 INFO SystemMonitor:127620 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:37:04,117 INFO SystemMonitor:127620 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:37:04,117 INFO SystemMonitor:127620 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:37:04,117 INFO SystemMonitor:127620 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:37:04,166 DEBUG HandlerThread:127620 [system_info.py:probe():150] Probing system +2024-05-14 16:37:04,176 DEBUG HandlerThread:127620 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:37:04,197 ERROR HandlerThread:127620 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:37:04,197 DEBUG HandlerThread:127620 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:37:04,197 DEBUG HandlerThread:127620 [system_info.py:probe():198] Probing system done +2024-05-14 16:37:04,197 DEBUG HandlerThread:127620 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:37:04.166942', 'startedAt': '2024-05-14T16:37:03.738413', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3390.131605263158, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3317.565, 'min': 800.0, 'max': 3400.0}, {'current': 3317.565, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3317.564, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3317.869, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.996, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3317.558, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3211.982, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4235954284668}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:37:04,197 INFO HandlerThread:127620 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:37:04,197 INFO HandlerThread:127620 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:37:04,199 INFO HandlerThread:127620 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:37:04,202 DEBUG SenderThread:127620 [sender.py:send():378] send: files +2024-05-14 16:37:04,202 INFO SenderThread:127620 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:37:04,296 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:37:04,297 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:37:04,297 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:37:04,298 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:37:04,494 DEBUG SenderThread:127620 [sender.py:send():378] send: telemetry +2024-05-14 16:37:04,709 INFO wandb-upload_0:127620 [upload_job.py:push():130] Uploaded file /tmp/tmp83aaf2d9wandb/cu12h5ip-wandb-metadata.json +2024-05-14 16:37:05,028 INFO Thread-12 :127620 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log +2024-05-14 16:37:05,028 INFO Thread-12 :127620 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-metadata.json +2024-05-14 16:37:05,028 INFO Thread-12 :127620 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt +2024-05-14 16:37:07,028 INFO Thread-12 :127620 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log +2024-05-14 16:37:08,975 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:10,080 DEBUG SenderThread:127620 [sender.py:send():378] send: exit +2024-05-14 16:37:10,080 INFO SenderThread:127620 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:37:10,080 INFO SenderThread:127620 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 16:37:10,081 INFO SenderThread:127620 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:10,082 INFO SenderThread:127620 [sender.py:send_exit():593] send defer +2024-05-14 16:37:10,082 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,082 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:37:10,082 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,082 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:37:10,082 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:37:10,082 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,082 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:37:10,082 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,082 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:37:10,082 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:37:10,082 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,082 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:37:10,082 INFO HandlerThread:127620 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:37:10,082 DEBUG SystemMonitor:127620 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:37:10,082 DEBUG SystemMonitor:127620 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:37:10,082 DEBUG SystemMonitor:127620 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:37:10,083 INFO HandlerThread:127620 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:37:10,083 INFO HandlerThread:127620 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:37:10,083 INFO HandlerThread:127620 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:37:10,083 INFO HandlerThread:127620 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:37:10,084 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:37:10,084 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,084 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:37:10,084 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:37:10,084 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,084 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:37:10,084 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:37:10,084 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:37:10,084 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,084 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:37:10,085 DEBUG SenderThread:127620 [sender.py:send():378] send: summary +2024-05-14 16:37:10,085 INFO SenderThread:127620 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:10,085 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,085 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:37:10,085 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:37:10,085 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,085 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:37:10,086 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,086 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:37:10,088 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:10,172 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:37:10,173 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:10,173 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:37:10,173 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:10,173 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:37:11,030 INFO Thread-12 :127620 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log +2024-05-14 16:37:11,031 INFO Thread-12 :127620 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml +2024-05-14 16:37:11,031 INFO Thread-12 :127620 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json +2024-05-14 16:37:11,080 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:12,508 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:37:12,508 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:12,508 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:12,508 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:37:12,509 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:12,509 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:37:12,509 INFO SenderThread:127620 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:37:12,509 INFO SenderThread:127620 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:37:12,509 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:37:12,509 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:12,509 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:37:12,509 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:12,509 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:37:12,509 INFO SenderThread:127620 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:37:13,032 INFO SenderThread:127620 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log +2024-05-14 16:37:13,032 INFO SenderThread:127620 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files +2024-05-14 16:37:13,032 INFO SenderThread:127620 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml config.yaml +2024-05-14 16:37:13,032 INFO SenderThread:127620 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log output.log +2024-05-14 16:37:13,033 INFO SenderThread:127620 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json wandb-summary.json +2024-05-14 16:37:13,034 INFO SenderThread:127620 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:37:13,037 INFO SenderThread:127620 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt requirements.txt +2024-05-14 16:37:13,037 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:37:13,038 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,038 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:37:13,039 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,039 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:37:13,039 INFO SenderThread:127620 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:13,080 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:13,081 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:13,301 INFO wandb-upload_0:127620 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/output.log +2024-05-14 16:37:13,426 INFO wandb-upload_1:127620 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/config.yaml +2024-05-14 16:37:13,514 INFO wandb-upload_3:127620 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/requirements.txt +2024-05-14 16:37:13,514 INFO wandb-upload_2:127620 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/files/wandb-summary.json +2024-05-14 16:37:13,715 INFO Thread-11 (_thread_body):127620 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:37:13,715 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,715 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:37:13,716 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,716 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:37:13,716 INFO SenderThread:127620 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:37:13,716 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:37:13,716 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,716 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:37:13,716 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,716 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:37:13,716 INFO SenderThread:127620 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:37:13,943 INFO SenderThread:127620 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:37:13,943 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:37:13,943 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,943 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:37:13,943 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,943 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:37:13,943 INFO SenderThread:127620 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:37:13,944 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,944 DEBUG SenderThread:127620 [sender.py:send():378] send: final +2024-05-14 16:37:13,944 INFO HandlerThread:127620 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:37:13,944 DEBUG SenderThread:127620 [sender.py:send():378] send: footer +2024-05-14 16:37:13,944 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,944 INFO SenderThread:127620 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:37:13,944 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:13,944 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:13,945 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:13,945 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:37:13,945 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:13,945 DEBUG SenderThread:127620 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:37:13,946 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:37:13,946 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:37:13,947 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:37:14,007 INFO MainThread:127620 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:37:14,007 INFO MainThread:127620 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:37:14,007 INFO MainThread:127620 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:37:14,007 DEBUG HandlerThread:127620 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:37:14,007 INFO HandlerThread:127620 [handler.py:finish():882] shutting down handler +2024-05-14 16:37:14,945 INFO WriterThread:127620 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/run-t0yeqrn0.wandb +2024-05-14 16:37:15,007 INFO SenderThread:127620 [sender.py:finish():1545] shutting down sender +2024-05-14 16:37:15,007 INFO SenderThread:127620 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:15,007 INFO SenderThread:127620 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..40a4bc62ce25627e599930871c390104bc8722d8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Configure stats pid to 126392 +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:37:03,745 WARNING MainThread:126392 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug.log +2024-05-14 16:37:03,745 INFO MainThread:126392 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/logs/debug-internal.log +2024-05-14 16:37:03,746 INFO MainThread:126392 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:37:03,746 INFO MainThread:126392 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:37:03,746 INFO MainThread:126392 [wandb_init.py:init():610] starting backend +2024-05-14 16:37:03,746 INFO MainThread:126392 [wandb_init.py:init():614] setting up manager +2024-05-14 16:37:03,747 INFO MainThread:126392 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:37:03,748 INFO MainThread:126392 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:37:03,750 INFO MainThread:126392 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:37:03,762 INFO MainThread:126392 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:37:04,033 INFO MainThread:126392 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:37:04,110 INFO MainThread:126392 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:37:04,110 INFO MainThread:126392 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:37:04,297 INFO MainThread:126392 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:37:04,297 INFO MainThread:126392 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:37:04,297 INFO MainThread:126392 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:37:04,297 INFO MainThread:126392 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:37:04,298 INFO MainThread:126392 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:37:15,008 WARNING MsgRouterThr:126392 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/run-t0yeqrn0.wandb b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/run-t0yeqrn0.wandb new file mode 100644 index 0000000000000000000000000000000000000000..ae8f266a474396d654ded62c918d6093023c3673 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163703-t0yeqrn0/run-t0yeqrn0.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f6c8298d7f6388c49b502891050f1a4aef6d7c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705031 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..cc5e05e57f3bb724504bd40115f391771d6a23b0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:43:51,837 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:43:56,528 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:43:56,531 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:43:56,531 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:44:05,301 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:44:05,301 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 16:44:04,889] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:16:44:06,685 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,685 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,707 INFO [task.py:395] Building contexts for indiccopa-hi on rank 6... +100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 95093.53it/s] +2024-05-14:16:44:08,634 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..6b69adc39010b9d4fa7a52905bd5cbe985ef969b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:43:51.702307", + "startedAt": "2024-05-14T16:43:51.255535", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3393.718789473684, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.906, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3366.727, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3213.122, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4305686950684 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..91df0012cef27fbd76437f2803da1fd4192acd69 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 24}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..4a1186eaa71f37eb568bcd511c5afcc698d21d3e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-14 16:43:51,267 INFO StreamThr :130083 [internal.py:wandb_internal():85] W&B internal server running at pid: 130083, started at: 2024-05-14 16:43:51.267232 +2024-05-14 16:43:51,269 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:43:51,270 INFO WriterThread:130083 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/run-b6ereuxf.wandb +2024-05-14 16:43:51,271 DEBUG SenderThread:130083 [sender.py:send():378] send: header +2024-05-14 16:43:51,282 DEBUG SenderThread:130083 [sender.py:send():378] send: run +2024-05-14 16:43:51,504 INFO SenderThread:130083 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files +2024-05-14 16:43:51,505 INFO SenderThread:130083 [sender.py:_start_run_threads():1123] run started: b6ereuxf with start time 1715705031.266986 +2024-05-14 16:43:51,512 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:43:51,512 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:43:51,596 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:43:51,598 DEBUG HandlerThread:130083 [system_info.py:__init__():26] System info init +2024-05-14 16:43:51,598 DEBUG HandlerThread:130083 [system_info.py:__init__():41] System info init done +2024-05-14 16:43:51,598 INFO HandlerThread:130083 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:43:51,598 INFO SystemMonitor:130083 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:43:51,598 INFO HandlerThread:130083 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:43:51,599 INFO SystemMonitor:130083 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:43:51,599 INFO SystemMonitor:130083 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:43:51,600 INFO SystemMonitor:130083 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:43:51,600 INFO SystemMonitor:130083 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:43:51,702 DEBUG HandlerThread:130083 [system_info.py:probe():150] Probing system +2024-05-14 16:43:51,710 DEBUG HandlerThread:130083 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:43:51,731 ERROR HandlerThread:130083 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:43:51,731 DEBUG HandlerThread:130083 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:43:51,731 DEBUG HandlerThread:130083 [system_info.py:probe():198] Probing system done +2024-05-14 16:43:51,731 DEBUG HandlerThread:130083 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:43:51.702307', 'startedAt': '2024-05-14T16:43:51.255535', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3393.718789473684, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.002, 'min': 800.0, 'max': 3400.0}, {'current': 3299.906, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3366.727, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3213.122, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4305686950684}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:43:51,731 INFO HandlerThread:130083 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:43:51,731 INFO HandlerThread:130083 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:43:51,732 INFO HandlerThread:130083 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:43:51,736 DEBUG SenderThread:130083 [sender.py:send():378] send: files +2024-05-14 16:43:51,736 INFO SenderThread:130083 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:43:51,832 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:43:51,832 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:43:51,833 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:43:51,834 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:43:51,964 DEBUG SenderThread:130083 [sender.py:send():378] send: telemetry +2024-05-14 16:43:52,242 INFO wandb-upload_0:130083 [upload_job.py:push():130] Uploaded file /tmp/tmpzm7bhx53wandb/xwi0gcq8-wandb-metadata.json +2024-05-14 16:43:52,506 INFO Thread-12 :130083 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-metadata.json +2024-05-14 16:43:52,506 INFO Thread-12 :130083 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt +2024-05-14 16:43:52,506 INFO Thread-12 :130083 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:43:54,506 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:43:56,530 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:43:58,509 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:01,532 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:04,514 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:06,516 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:06,685 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:06,833 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:44:06,833 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:44:07,517 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:08,519 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:10,521 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:11,522 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:12,150 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,882 DEBUG SenderThread:130083 [sender.py:send():378] send: exit +2024-05-14 16:44:15,883 INFO SenderThread:130083 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:44:15,883 INFO SenderThread:130083 [sender.py:send_exit():587] handling runtime: 24 +2024-05-14 16:44:15,884 INFO SenderThread:130083 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,884 INFO SenderThread:130083 [sender.py:send_exit():593] send defer +2024-05-14 16:44:15,885 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:44:15,885 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,885 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:44:15,885 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:44:15,885 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:44:15,885 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,885 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:44:15,885 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:44:15,885 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:44:15,885 INFO HandlerThread:130083 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130083 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:44:15,886 INFO HandlerThread:130083 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130083 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:44:15,886 INFO HandlerThread:130083 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130083 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:44:15,886 INFO HandlerThread:130083 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:44:15,888 INFO HandlerThread:130083 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:44:15,888 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,888 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:44:15,888 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:44:15,889 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,889 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:44:15,889 DEBUG SenderThread:130083 [sender.py:send():378] send: stats +2024-05-14 16:44:15,890 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:44:15,890 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:44:15,890 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,890 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:44:15,890 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:44:15,890 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:44:15,890 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,890 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:44:15,890 DEBUG SenderThread:130083 [sender.py:send():378] send: summary +2024-05-14 16:44:15,891 INFO SenderThread:130083 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,891 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,891 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:44:15,891 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:44:15,891 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,891 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:44:15,891 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,891 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:44:15,895 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,962 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:44:15,963 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,963 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:44:15,963 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,963 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:44:16,527 INFO Thread-12 :130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml +2024-05-14 16:44:16,527 INFO Thread-12 :130083 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json +2024-05-14 16:44:16,883 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:16,904 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:44:16,904 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:16,904 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,904 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:44:16,904 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,904 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:44:16,904 INFO SenderThread:130083 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:44:16,904 INFO SenderThread:130083 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:44:16,905 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:44:16,905 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,905 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:44:16,905 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,905 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:44:16,905 INFO SenderThread:130083 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log output.log +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml config.yaml +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json wandb-summary.json +2024-05-14 16:44:17,528 INFO SenderThread:130083 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt requirements.txt +2024-05-14 16:44:17,529 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:44:17,529 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,529 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:44:17,530 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,530 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:44:17,530 INFO SenderThread:130083 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:17,759 INFO wandb-upload_1:130083 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/config.yaml +2024-05-14 16:44:17,883 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,883 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,944 INFO wandb-upload_0:130083 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/output.log +2024-05-14 16:44:18,033 INFO wandb-upload_3:130083 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/requirements.txt +2024-05-14 16:44:18,036 INFO wandb-upload_2:130083 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/files/wandb-summary.json +2024-05-14 16:44:18,236 INFO Thread-11 (_thread_body):130083 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:44:18,237 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,237 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:44:18,237 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,237 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:44:18,237 INFO SenderThread:130083 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:44:18,237 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:44:18,237 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,238 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:44:18,238 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,238 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:44:18,238 INFO SenderThread:130083 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:44:18,299 INFO SenderThread:130083 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:44:18,299 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:44:18,299 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,299 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:44:18,299 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,299 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:44:18,299 INFO SenderThread:130083 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:44:18,299 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,299 INFO HandlerThread:130083 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:44:18,299 DEBUG SenderThread:130083 [sender.py:send():378] send: final +2024-05-14 16:44:18,299 DEBUG SenderThread:130083 [sender.py:send():378] send: footer +2024-05-14 16:44:18,300 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,300 INFO SenderThread:130083 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:44:18,300 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,300 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,300 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,300 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,301 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:44:18,301 DEBUG SenderThread:130083 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:44:18,302 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:44:18,302 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:44:18,302 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:44:18,363 INFO MainThread:130083 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:44:18,363 INFO MainThread:130083 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:44:18,363 INFO MainThread:130083 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:44:18,363 DEBUG HandlerThread:130083 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:44:18,364 INFO HandlerThread:130083 [handler.py:finish():882] shutting down handler +2024-05-14 16:44:19,301 INFO WriterThread:130083 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/run-b6ereuxf.wandb +2024-05-14 16:44:19,363 INFO SenderThread:130083 [sender.py:finish():1545] shutting down sender +2024-05-14 16:44:19,363 INFO SenderThread:130083 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:19,363 INFO SenderThread:130083 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..77af489394631b0edbd1ad7c823975130dfc6b7c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Configure stats pid to 128893 +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:43:51,264 WARNING MainThread:128893 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug.log +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/logs/debug-internal.log +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:init():610] starting backend +2024-05-14 16:43:51,264 INFO MainThread:128893 [wandb_init.py:init():614] setting up manager +2024-05-14 16:43:51,266 INFO MainThread:128893 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:43:51,266 INFO MainThread:128893 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:43:51,269 INFO MainThread:128893 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:43:51,281 INFO MainThread:128893 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:43:51,511 INFO MainThread:128893 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:43:51,592 INFO MainThread:128893 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:43:51,592 INFO MainThread:128893 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:43:51,833 INFO MainThread:128893 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:43:51,834 INFO MainThread:128893 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:43:51,834 INFO MainThread:128893 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:43:51,834 INFO MainThread:128893 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:43:51,835 INFO MainThread:128893 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:44:19,364 WARNING MsgRouterThr:128893 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/run-b6ereuxf.wandb b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/run-b6ereuxf.wandb new file mode 100644 index 0000000000000000000000000000000000000000..af2aa07f7298f07172ae4d710acb806a41771c14 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164351-b6ereuxf/run-b6ereuxf.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..359bde41e16a3d0b09bb1a6fb0f0770afddf8a4e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716440144 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..6c143fa01109c74fdee9cbe4de6c422c119a3d46 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:04:55:45,311 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:04:55:53,766 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:04:55:53,767 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:04:55:53,767 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000'} +2024-05-23:04:55:56,075 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..8c8387e883fb91a6adb0529b203f53f1c5fb48dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T04:55:45.111786", + "startedAt": "2024-05-23T04:55:44.608684", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.4999875000003, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3392.442, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3368.04, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3117.704, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 212.19972229003906 + } + }, + "memory": { + "total": 1007.43798828125 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54d8bbcdae4dbc1948b7c4ec5f6bd283f972a90a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469867 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0229db9e4c9de077969334b1b9f15aed3a106e53 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:11:07,767 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:11:16,378 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:11:16,379 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:11:16,380 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000'} +2024-05-23:13:11:18,840 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..5d12fb7f8eb0daa9c92a1c1ce879f1e980b5fb4e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:11:07.564939", + "startedAt": "2024-05-23T13:11:06.999566", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.37210625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.6166000366211 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..cc9288bae17c787d27f8ccddccb1b56b96b9b34b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 13:11:07,026 INFO StreamThr :4129 [internal.py:wandb_internal():85] W&B internal server running at pid: 4129, started at: 2024-05-23 13:11:07.024158 +2024-05-23 13:11:07,031 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:11:07,032 INFO WriterThread:4129 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/run-7mz24exl.wandb +2024-05-23 13:11:07,034 DEBUG SenderThread:4129 [sender.py:send():378] send: header +2024-05-23 13:11:07,037 DEBUG SenderThread:4129 [sender.py:send():378] send: run +2024-05-23 13:11:07,346 INFO SenderThread:4129 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files +2024-05-23 13:11:07,346 INFO SenderThread:4129 [sender.py:_start_run_threads():1123] run started: 7mz24exl with start time 1716469867.024013 +2024-05-23 13:11:07,347 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:11:07,347 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:11:07,470 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:11:07,472 DEBUG HandlerThread:4129 [system_info.py:__init__():26] System info init +2024-05-23 13:11:07,472 DEBUG HandlerThread:4129 [system_info.py:__init__():41] System info init done +2024-05-23 13:11:07,472 INFO HandlerThread:4129 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:11:07,472 INFO SystemMonitor:4129 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:11:07,472 INFO HandlerThread:4129 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:11:07,479 INFO SystemMonitor:4129 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:11:07,479 INFO SystemMonitor:4129 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:11:07,480 INFO SystemMonitor:4129 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:11:07,480 INFO SystemMonitor:4129 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:11:07,564 DEBUG HandlerThread:4129 [system_info.py:probe():150] Probing system +2024-05-23 13:11:07,568 DEBUG HandlerThread:4129 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:11:07,578 ERROR HandlerThread:4129 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:11:07,578 DEBUG HandlerThread:4129 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:11:07,578 DEBUG HandlerThread:4129 [system_info.py:probe():198] Probing system done +2024-05-23 13:11:07,578 DEBUG HandlerThread:4129 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:11:07.564939', 'startedAt': '2024-05-23T13:11:06.999566', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.37210625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6166000366211}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:11:07,578 INFO HandlerThread:4129 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:11:07,578 INFO HandlerThread:4129 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:11:07,581 INFO HandlerThread:4129 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:11:07,586 DEBUG SenderThread:4129 [sender.py:send():378] send: files +2024-05-23 13:11:07,586 INFO SenderThread:4129 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:11:07,761 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:11:07,761 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:11:07,762 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:11:07,763 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:11:07,909 DEBUG SenderThread:4129 [sender.py:send():378] send: telemetry +2024-05-23 13:11:08,142 INFO wandb-upload_0:4129 [upload_job.py:push():130] Uploaded file /tmp/tmppwcva5afwandb/2sgs5hir-wandb-metadata.json +2024-05-23 13:11:08,348 INFO Thread-12 :4129 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt +2024-05-23 13:11:08,348 INFO Thread-12 :4129 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log +2024-05-23 13:11:08,348 INFO Thread-12 :4129 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-metadata.json +2024-05-23 13:11:10,347 INFO Thread-12 :4129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log +2024-05-23 13:11:12,915 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:18,354 INFO Thread-12 :4129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log +2024-05-23 13:11:18,380 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:18,860 DEBUG SenderThread:4129 [sender.py:send():378] send: exit +2024-05-23 13:11:18,860 INFO SenderThread:4129 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:11:18,860 INFO SenderThread:4129 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 13:11:18,861 INFO SenderThread:4129 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:11:18,862 INFO SenderThread:4129 [sender.py:send_exit():593] send defer +2024-05-23 13:11:18,862 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,862 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:11:18,862 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,862 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:11:18,862 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:11:18,862 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,862 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:11:18,862 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,862 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:11:18,862 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:11:18,862 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,863 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:11:18,863 INFO HandlerThread:4129 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:11:18,863 DEBUG SystemMonitor:4129 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:11:18,863 DEBUG SystemMonitor:4129 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:11:18,863 DEBUG SystemMonitor:4129 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:11:18,864 INFO HandlerThread:4129 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:11:18,864 INFO HandlerThread:4129 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:11:18,864 INFO HandlerThread:4129 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:11:18,864 INFO HandlerThread:4129 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:11:18,864 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,864 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:11:18,864 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:11:18,864 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,864 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:11:18,864 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,864 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:11:18,864 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:11:18,864 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,865 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:11:18,865 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,865 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:11:18,865 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:11:18,865 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,865 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:11:18,865 DEBUG SenderThread:4129 [sender.py:send():378] send: summary +2024-05-23 13:11:18,866 INFO SenderThread:4129 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:11:18,866 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,866 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:11:18,866 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:11:18,866 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,866 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:11:18,866 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,866 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:11:18,871 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:18,942 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:11:18,942 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:18,942 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:11:18,942 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:18,942 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:11:19,356 INFO Thread-12 :4129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml +2024-05-23 13:11:19,357 INFO Thread-12 :4129 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json +2024-05-23 13:11:19,860 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:19,935 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:11:19,935 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:19,935 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:19,935 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:11:19,936 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:19,936 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:11:19,936 INFO SenderThread:4129 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:11:19,936 INFO SenderThread:4129 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:11:19,936 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:11:19,936 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:19,937 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:11:19,937 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:19,937 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:11:19,937 INFO SenderThread:4129 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:11:20,358 INFO SenderThread:4129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log +2024-05-23 13:11:20,358 INFO SenderThread:4129 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files +2024-05-23 13:11:20,358 INFO SenderThread:4129 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log output.log +2024-05-23 13:11:20,358 INFO SenderThread:4129 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:11:20,358 INFO SenderThread:4129 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml config.yaml +2024-05-23 13:11:20,361 INFO SenderThread:4129 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt requirements.txt +2024-05-23 13:11:20,363 INFO SenderThread:4129 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json wandb-summary.json +2024-05-23 13:11:20,363 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:11:20,363 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:20,363 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:11:20,363 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:20,363 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:11:20,363 INFO SenderThread:4129 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:11:20,618 INFO wandb-upload_0:4129 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/output.log +2024-05-23 13:11:20,860 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:20,861 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:20,946 INFO wandb-upload_3:4129 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/wandb-summary.json +2024-05-23 13:11:20,982 INFO wandb-upload_2:4129 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/requirements.txt +2024-05-23 13:11:20,985 INFO wandb-upload_1:4129 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/files/config.yaml +2024-05-23 13:11:21,186 INFO Thread-11 (_thread_body):4129 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:11:21,186 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:21,186 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:11:21,186 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:21,186 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:11:21,186 INFO SenderThread:4129 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:11:21,186 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:11:21,187 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:21,187 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:11:21,187 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:21,187 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:11:21,187 INFO SenderThread:4129 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:11:21,261 INFO SenderThread:4129 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:11:21,261 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:11:21,261 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:21,261 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:11:21,261 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:21,261 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:11:21,261 INFO SenderThread:4129 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:11:21,261 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:21,261 INFO HandlerThread:4129 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:11:21,262 DEBUG SenderThread:4129 [sender.py:send():378] send: final +2024-05-23 13:11:21,262 DEBUG SenderThread:4129 [sender.py:send():378] send: footer +2024-05-23 13:11:21,262 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:21,262 INFO SenderThread:4129 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:11:21,262 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:21,262 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:21,263 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:21,263 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:11:21,263 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:11:21,263 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:11:21,263 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:11:21,263 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:21,264 DEBUG SenderThread:4129 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:11:21,320 INFO MainThread:4129 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:11:21,320 INFO MainThread:4129 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:11:21,320 INFO MainThread:4129 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:11:21,321 DEBUG HandlerThread:4129 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:11:21,321 INFO HandlerThread:4129 [handler.py:finish():882] shutting down handler +2024-05-23 13:11:22,263 INFO WriterThread:4129 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/run-7mz24exl.wandb +2024-05-23 13:11:22,320 INFO SenderThread:4129 [sender.py:finish():1545] shutting down sender +2024-05-23 13:11:22,320 INFO SenderThread:4129 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:11:22,320 INFO SenderThread:4129 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..2f83067e5eda124509d72e088f81c755275d47cc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Configure stats pid to 3974 +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:11:07,019 WARNING MainThread:3974 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug.log +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/logs/debug-internal.log +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:init():610] starting backend +2024-05-23 13:11:07,019 INFO MainThread:3974 [wandb_init.py:init():614] setting up manager +2024-05-23 13:11:07,022 INFO MainThread:3974 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:11:07,023 INFO MainThread:3974 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:11:07,027 INFO MainThread:3974 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:11:07,036 INFO MainThread:3974 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:11:07,346 INFO MainThread:3974 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:11:07,464 INFO MainThread:3974 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:11:07,464 INFO MainThread:3974 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:11:07,762 INFO MainThread:3974 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:11:07,762 INFO MainThread:3974 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:11:07,762 INFO MainThread:3974 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:11:07,763 INFO MainThread:3974 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:11:07,765 INFO MainThread:3974 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:11:22,322 WARNING MsgRouterThr:3974 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/run-7mz24exl.wandb b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/run-7mz24exl.wandb new file mode 100644 index 0000000000000000000000000000000000000000..6b75f795b6020f25a19ac0c0abd6ca40bf236a55 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_131106-7mz24exl/run-7mz24exl.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..643fda96068f4a6407eae3f1784d990a864b3e6b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469898 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..8265ed7c776a3e9cf655161432d055f6376386b7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:11:38,739 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:11:47,234 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:11:47,235 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:11:47,236 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000'} +2024-05-23:13:11:49,528 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0cf448c2f64f6b56fa4594e7189301d651c142bd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:11:38.517273", + "startedAt": "2024-05-23T13:11:37.985555", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2333.3881499999998, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.61664581298828 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..8500d2451aea0b9630c596ae9abe025e7b557c64 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 13:11:38,009 INFO StreamThr :4377 [internal.py:wandb_internal():85] W&B internal server running at pid: 4377, started at: 2024-05-23 13:11:38.004916 +2024-05-23 13:11:38,012 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:11:38,012 INFO WriterThread:4377 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/run-q8nkqecr.wandb +2024-05-23 13:11:38,018 DEBUG SenderThread:4377 [sender.py:send():378] send: header +2024-05-23 13:11:38,018 DEBUG SenderThread:4377 [sender.py:send():378] send: run +2024-05-23 13:11:38,325 INFO SenderThread:4377 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files +2024-05-23 13:11:38,325 INFO SenderThread:4377 [sender.py:_start_run_threads():1123] run started: q8nkqecr with start time 1716469898.004776 +2024-05-23 13:11:38,325 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:11:38,326 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:11:38,442 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:11:38,444 DEBUG HandlerThread:4377 [system_info.py:__init__():26] System info init +2024-05-23 13:11:38,444 DEBUG HandlerThread:4377 [system_info.py:__init__():41] System info init done +2024-05-23 13:11:38,444 INFO HandlerThread:4377 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:11:38,445 INFO SystemMonitor:4377 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:11:38,445 INFO HandlerThread:4377 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:11:38,451 INFO SystemMonitor:4377 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:11:38,452 INFO SystemMonitor:4377 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:11:38,454 INFO SystemMonitor:4377 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:11:38,455 INFO SystemMonitor:4377 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:11:38,517 DEBUG HandlerThread:4377 [system_info.py:probe():150] Probing system +2024-05-23 13:11:38,520 DEBUG HandlerThread:4377 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:11:38,530 ERROR HandlerThread:4377 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:11:38,530 DEBUG HandlerThread:4377 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:11:38,530 DEBUG HandlerThread:4377 [system_info.py:probe():198] Probing system done +2024-05-23 13:11:38,530 DEBUG HandlerThread:4377 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:11:38.517273', 'startedAt': '2024-05-23T13:11:37.985555', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2333.3881499999998, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.61664581298828}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:11:38,530 INFO HandlerThread:4377 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:11:38,531 INFO HandlerThread:4377 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:11:38,534 INFO HandlerThread:4377 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:11:38,538 DEBUG SenderThread:4377 [sender.py:send():378] send: files +2024-05-23 13:11:38,539 INFO SenderThread:4377 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:11:38,717 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:11:38,718 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:11:38,718 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:11:38,721 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:11:38,844 DEBUG SenderThread:4377 [sender.py:send():378] send: telemetry +2024-05-23 13:11:39,124 INFO wandb-upload_0:4377 [upload_job.py:push():130] Uploaded file /tmp/tmpljl0othawandb/eyc92gjw-wandb-metadata.json +2024-05-23 13:11:39,327 INFO Thread-12 :4377 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-metadata.json +2024-05-23 13:11:39,327 INFO Thread-12 :4377 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log +2024-05-23 13:11:39,327 INFO Thread-12 :4377 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt +2024-05-23 13:11:41,326 INFO Thread-12 :4377 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log +2024-05-23 13:11:43,846 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:49,237 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:49,333 INFO Thread-12 :4377 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log +2024-05-23 13:11:49,543 DEBUG SenderThread:4377 [sender.py:send():378] send: exit +2024-05-23 13:11:49,543 INFO SenderThread:4377 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:11:49,543 INFO SenderThread:4377 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 13:11:49,544 INFO SenderThread:4377 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:11:49,544 INFO SenderThread:4377 [sender.py:send_exit():593] send defer +2024-05-23 13:11:49,544 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,544 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:11:49,545 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,545 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:11:49,545 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:11:49,545 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,545 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:11:49,545 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,545 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:11:49,545 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:11:49,545 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,545 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:11:49,545 INFO HandlerThread:4377 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:11:49,545 DEBUG SystemMonitor:4377 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:11:49,545 DEBUG SystemMonitor:4377 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:11:49,546 DEBUG SystemMonitor:4377 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:11:49,546 INFO HandlerThread:4377 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:11:49,547 INFO HandlerThread:4377 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:11:49,547 INFO HandlerThread:4377 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:11:49,547 INFO HandlerThread:4377 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:11:49,548 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,548 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:11:49,548 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:11:49,548 DEBUG SenderThread:4377 [sender.py:send():378] send: stats +2024-05-23 13:11:49,548 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,549 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:11:49,550 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,550 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:11:49,550 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:11:49,550 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,550 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:11:49,550 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,550 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:11:49,550 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:11:49,550 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,550 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:11:49,550 DEBUG SenderThread:4377 [sender.py:send():378] send: summary +2024-05-23 13:11:49,551 INFO SenderThread:4377 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:11:49,551 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,551 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:11:49,551 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:11:49,551 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,551 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:11:49,551 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,552 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:11:49,556 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:11:49,624 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:11:49,624 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:49,624 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:11:49,624 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:49,624 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:11:50,334 INFO Thread-12 :4377 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml +2024-05-23 13:11:50,334 INFO Thread-12 :4377 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json +2024-05-23 13:11:50,543 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:50,869 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:11:50,870 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:50,870 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:50,870 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:11:50,870 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:50,870 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:11:50,870 INFO SenderThread:4377 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:11:50,871 INFO SenderThread:4377 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:11:50,871 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:11:50,871 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:50,871 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:11:50,871 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:50,871 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:11:50,871 INFO SenderThread:4377 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:11:51,336 INFO SenderThread:4377 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log +2024-05-23 13:11:51,336 INFO SenderThread:4377 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files +2024-05-23 13:11:51,336 INFO SenderThread:4377 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:11:51,336 INFO SenderThread:4377 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log output.log +2024-05-23 13:11:51,336 INFO SenderThread:4377 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml config.yaml +2024-05-23 13:11:51,339 INFO SenderThread:4377 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json wandb-summary.json +2024-05-23 13:11:51,339 INFO SenderThread:4377 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt requirements.txt +2024-05-23 13:11:51,339 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:11:51,339 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:51,339 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:11:51,340 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:51,340 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:11:51,340 INFO SenderThread:4377 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:11:51,543 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:51,543 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:51,586 INFO wandb-upload_0:4377 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/output.log +2024-05-23 13:11:51,915 INFO wandb-upload_1:4377 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/config.yaml +2024-05-23 13:11:51,974 INFO wandb-upload_3:4377 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/requirements.txt +2024-05-23 13:11:52,003 INFO wandb-upload_2:4377 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/files/wandb-summary.json +2024-05-23 13:11:52,204 INFO Thread-11 (_thread_body):4377 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:11:52,204 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:52,204 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:11:52,204 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:52,204 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:11:52,204 INFO SenderThread:4377 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:11:52,204 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:11:52,204 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:52,204 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:11:52,205 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:52,205 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:11:52,205 INFO SenderThread:4377 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:11:52,284 INFO SenderThread:4377 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:11:52,284 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:11:52,284 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:52,284 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:11:52,284 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:52,284 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:11:52,284 INFO SenderThread:4377 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:11:52,284 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:11:52,284 INFO HandlerThread:4377 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:11:52,285 DEBUG SenderThread:4377 [sender.py:send():378] send: final +2024-05-23 13:11:52,285 DEBUG SenderThread:4377 [sender.py:send():378] send: footer +2024-05-23 13:11:52,285 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: defer +2024-05-23 13:11:52,285 INFO SenderThread:4377 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:11:52,285 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:52,285 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:52,286 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:11:52,286 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:11:52,286 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:11:52,286 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:11:52,286 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:11:52,286 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:11:52,286 DEBUG SenderThread:4377 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:11:52,344 INFO MainThread:4377 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:11:52,344 INFO MainThread:4377 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:11:52,344 INFO MainThread:4377 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:11:52,345 DEBUG HandlerThread:4377 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:11:52,345 INFO HandlerThread:4377 [handler.py:finish():882] shutting down handler +2024-05-23 13:11:53,286 INFO WriterThread:4377 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/run-q8nkqecr.wandb +2024-05-23 13:11:53,344 INFO SenderThread:4377 [sender.py:finish():1545] shutting down sender +2024-05-23 13:11:53,344 INFO SenderThread:4377 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:11:53,344 INFO SenderThread:4377 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..4ac32fe01bb7942838b6cac79a94a7570c18196b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Configure stats pid to 4222 +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:11:38,000 WARNING MainThread:4222 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug.log +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/logs/debug-internal.log +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:init():610] starting backend +2024-05-23 13:11:38,000 INFO MainThread:4222 [wandb_init.py:init():614] setting up manager +2024-05-23 13:11:38,003 INFO MainThread:4222 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:11:38,004 INFO MainThread:4222 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:11:38,007 INFO MainThread:4222 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:11:38,016 INFO MainThread:4222 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:11:38,325 INFO MainThread:4222 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:11:38,436 INFO MainThread:4222 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:11:38,436 INFO MainThread:4222 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:11:38,719 INFO MainThread:4222 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:11:38,719 INFO MainThread:4222 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:11:38,719 INFO MainThread:4222 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:11:38,719 INFO MainThread:4222 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:11:38,737 INFO MainThread:4222 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:11:53,345 WARNING MsgRouterThr:4222 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/run-q8nkqecr.wandb b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/run-q8nkqecr.wandb new file mode 100644 index 0000000000000000000000000000000000000000..194a24bfea4eb28678b06a3c962987887a70dfa6 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_131137-q8nkqecr/run-q8nkqecr.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a5daf9e45f4915eb0bc69466dc12fe8a54d6345 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml @@ -0,0 +1,44 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717052687 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 13 + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..2e8a95acca2a5c05886fe2b80e0e5a341efd8aa5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log @@ -0,0 +1,38 @@ + +2024-05-30:07:04:47,779 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:07:04:56,819 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:07:04:56,820 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:07:04:56,821 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step50000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/'} +2024-05-30:07:04:59,297 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 237, in __init__ + self._create_tokenizer( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 630, in _create_tokenizer + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained + config = AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 644, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 699, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 360, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/ does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k//main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7391d0fb5302364497bd6017486c36f0caae0613 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt @@ -0,0 +1,154 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.2 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.15.2 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.36.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..dcce65b23f6386cbbf1e9e1757a8d5d498ae9076 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T07:04:47.565714", + "startedAt": "2024-05-30T07:04:47.053095", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step50000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step50000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-25-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2334.1559687500003, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.41989517211914 + } + }, + "memory": { + "total": 1007.4379768371582 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..c9ee7fbb723cc397f445841de3d0dd07d77a24f0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 33}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..69ef5be51cd10a5c8eb46f1bdbd08ab94645404a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-30 07:04:47,076 INFO StreamThr :900 [internal.py:wandb_internal():85] W&B internal server running at pid: 900, started at: 2024-05-30 07:04:47.074200 +2024-05-30 07:04:47,081 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status +2024-05-30 07:04:47,081 INFO WriterThread:900 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/run-9l46ukwo.wandb +2024-05-30 07:04:47,084 DEBUG SenderThread:900 [sender.py:send():378] send: header +2024-05-30 07:04:47,089 DEBUG SenderThread:900 [sender.py:send():378] send: run +2024-05-30 07:04:47,350 INFO SenderThread:900 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files +2024-05-30 07:04:47,350 INFO SenderThread:900 [sender.py:_start_run_threads():1123] run started: 9l46ukwo with start time 1717052687.074683 +2024-05-30 07:04:47,354 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 07:04:47,354 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: check_version +2024-05-30 07:04:47,470 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 07:04:47,472 DEBUG HandlerThread:900 [system_info.py:__init__():26] System info init +2024-05-30 07:04:47,472 DEBUG HandlerThread:900 [system_info.py:__init__():41] System info init done +2024-05-30 07:04:47,473 INFO HandlerThread:900 [system_monitor.py:start():194] Starting system monitor +2024-05-30 07:04:47,473 INFO SystemMonitor:900 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 07:04:47,473 INFO HandlerThread:900 [system_monitor.py:probe():214] Collecting system info +2024-05-30 07:04:47,480 INFO SystemMonitor:900 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 07:04:47,480 INFO SystemMonitor:900 [interfaces.py:start():188] Started disk monitoring +2024-05-30 07:04:47,481 INFO SystemMonitor:900 [interfaces.py:start():188] Started memory monitoring +2024-05-30 07:04:47,482 INFO SystemMonitor:900 [interfaces.py:start():188] Started network monitoring +2024-05-30 07:04:47,565 DEBUG HandlerThread:900 [system_info.py:probe():150] Probing system +2024-05-30 07:04:47,569 DEBUG HandlerThread:900 [system_info.py:_probe_git():135] Probing git +2024-05-30 07:04:47,579 ERROR HandlerThread:900 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 07:04:47,579 DEBUG HandlerThread:900 [system_info.py:_probe_git():143] Probing git done +2024-05-30 07:04:47,579 DEBUG HandlerThread:900 [system_info.py:probe():198] Probing system done +2024-05-30 07:04:47,579 DEBUG HandlerThread:900 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T07:04:47.565714', 'startedAt': '2024-05-30T07:04:47.053095', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step50000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step50000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-25-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.1559687500003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.41989517211914}}, 'memory': {'total': 1007.4379768371582}} +2024-05-30 07:04:47,579 INFO HandlerThread:900 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 07:04:47,579 INFO HandlerThread:900 [system_monitor.py:probe():227] Publishing system info +2024-05-30 07:04:47,582 INFO HandlerThread:900 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 07:04:47,589 DEBUG SenderThread:900 [sender.py:send():378] send: files +2024-05-30 07:04:47,589 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 07:04:47,771 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 07:04:47,772 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: python_packages +2024-05-30 07:04:47,774 DEBUG SenderThread:900 [sender.py:send():378] send: telemetry +2024-05-30 07:04:47,774 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:04:47,774 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:04:48,149 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /tmp/tmpxegp2vwhwandb/it32bqqo-wandb-metadata.json +2024-05-30 07:04:48,353 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt +2024-05-30 07:04:48,353 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-metadata.json +2024-05-30 07:04:48,353 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:04:50,353 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:04:52,924 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:04:58,359 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:04:58,821 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:00,362 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:05:02,773 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:05:02,774 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:05:03,905 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:08,906 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:13,906 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:17,773 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:05:17,773 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:05:18,920 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:19,843 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml +2024-05-30 07:05:20,785 DEBUG SenderThread:900 [sender.py:send():378] send: exit +2024-05-30 07:05:20,785 INFO SenderThread:900 [sender.py:send_exit():585] handling exit code: 1 +2024-05-30 07:05:20,785 INFO SenderThread:900 [sender.py:send_exit():587] handling runtime: 33 +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:send_exit():593] send defer +2024-05-30 07:05:20,787 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,787 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-30 07:05:20,787 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 1 +2024-05-30 07:05:20,787 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,787 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-30 07:05:20,787 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-30 07:05:20,787 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 2 +2024-05-30 07:05:20,787 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,787 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-30 07:05:20,788 INFO HandlerThread:900 [system_monitor.py:finish():203] Stopping system monitor +2024-05-30 07:05:20,788 DEBUG SystemMonitor:900 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-30 07:05:20,788 DEBUG SystemMonitor:900 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-30 07:05:20,788 DEBUG SystemMonitor:900 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-30 07:05:20,791 INFO HandlerThread:900 [interfaces.py:finish():200] Joined cpu monitor +2024-05-30 07:05:20,791 INFO HandlerThread:900 [interfaces.py:finish():200] Joined disk monitor +2024-05-30 07:05:20,791 INFO HandlerThread:900 [interfaces.py:finish():200] Joined memory monitor +2024-05-30 07:05:20,791 INFO HandlerThread:900 [interfaces.py:finish():200] Joined network monitor +2024-05-30 07:05:20,792 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,792 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-30 07:05:20,792 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 3 +2024-05-30 07:05:20,792 DEBUG SenderThread:900 [sender.py:send():378] send: stats +2024-05-30 07:05:20,793 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,793 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-30 07:05:20,793 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,793 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-30 07:05:20,793 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 4 +2024-05-30 07:05:20,793 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,793 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-30 07:05:20,793 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,793 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-30 07:05:20,793 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 5 +2024-05-30 07:05:20,793 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,793 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-30 07:05:20,793 DEBUG SenderThread:900 [sender.py:send():378] send: summary +2024-05-30 07:05:20,794 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 07:05:20,794 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,794 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-30 07:05:20,794 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 6 +2024-05-30 07:05:20,794 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,794 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-30 07:05:20,795 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,795 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-30 07:05:20,795 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 7 +2024-05-30 07:05:20,795 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:20,795 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:20,795 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-30 07:05:20,795 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:20,795 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-30 07:05:20,844 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:05:20,844 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json +2024-05-30 07:05:21,785 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:21,970 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 8 +2024-05-30 07:05:21,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:21,970 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:21,970 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-30 07:05:21,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:21,970 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-30 07:05:21,970 INFO SenderThread:900 [job_builder.py:build():432] Attempting to build job artifact +2024-05-30 07:05:21,971 INFO SenderThread:900 [job_builder.py:_get_source_type():576] no source found +2024-05-30 07:05:21,971 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 9 +2024-05-30 07:05:21,971 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:21,971 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-30 07:05:21,971 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:21,971 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-30 07:05:21,971 INFO SenderThread:900 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-30 07:05:22,785 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:22,846 INFO SenderThread:900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:05:22,846 INFO SenderThread:900 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files +2024-05-30 07:05:22,846 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-metadata.json wandb-metadata.json +2024-05-30 07:05:22,846 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log output.log +2024-05-30 07:05:22,846 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json wandb-summary.json +2024-05-30 07:05:22,848 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml config.yaml +2024-05-30 07:05:22,849 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt requirements.txt +2024-05-30 07:05:22,849 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 10 +2024-05-30 07:05:22,849 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:22,849 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:22,849 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-30 07:05:22,849 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:22,849 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-30 07:05:22,849 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 07:05:23,095 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/output.log +2024-05-30 07:05:23,368 INFO wandb-upload_3:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/requirements.txt +2024-05-30 07:05:23,417 INFO wandb-upload_1:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/wandb-summary.json +2024-05-30 07:05:23,458 INFO wandb-upload_2:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/files/config.yaml +2024-05-30 07:05:23,658 INFO Thread-11 (_thread_body):900 [sender.py:transition_state():613] send defer: 11 +2024-05-30 07:05:23,658 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:23,658 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-30 07:05:23,659 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:23,659 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-30 07:05:23,659 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher +2024-05-30 07:05:23,659 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 12 +2024-05-30 07:05:23,659 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:23,659 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-30 07:05:23,659 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:23,659 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-30 07:05:23,659 INFO SenderThread:900 [file_stream.py:finish():601] file stream finish called +2024-05-30 07:05:23,722 INFO SenderThread:900 [file_stream.py:finish():605] file stream finish is done +2024-05-30 07:05:23,722 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 13 +2024-05-30 07:05:23,722 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:23,722 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-30 07:05:23,723 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:23,723 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-30 07:05:23,723 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 14 +2024-05-30 07:05:23,723 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:23,723 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-30 07:05:23,723 DEBUG SenderThread:900 [sender.py:send():378] send: final +2024-05-30 07:05:23,723 DEBUG SenderThread:900 [sender.py:send():378] send: footer +2024-05-30 07:05:23,723 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:23,723 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: server_info +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: get_summary +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-30 07:05:23,724 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-30 07:05:23,724 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:23,724 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:23,724 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: server_info +2024-05-30 07:05:23,785 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-30 07:05:23,785 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-30 07:05:23,785 INFO MainThread:900 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-30 07:05:23,785 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: shutdown +2024-05-30 07:05:23,785 INFO HandlerThread:900 [handler.py:finish():882] shutting down handler +2024-05-30 07:05:24,724 INFO WriterThread:900 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/run-9l46ukwo.wandb +2024-05-30 07:05:24,785 INFO SenderThread:900 [sender.py:finish():1545] shutting down sender +2024-05-30 07:05:24,785 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 07:05:24,785 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..3e29aa09f4528c056a998e57b3970c096d5adbec --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Configure stats pid to 744 +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 07:04:47,068 WARNING MainThread:744 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 07:04:47,068 INFO MainThread:744 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug.log +2024-05-30 07:04:47,069 INFO MainThread:744 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/logs/debug-internal.log +2024-05-30 07:04:47,069 INFO MainThread:744 [wandb_init.py:init():560] calling init triggers +2024-05-30 07:04:47,069 INFO MainThread:744 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 07:04:47,069 INFO MainThread:744 [wandb_init.py:init():610] starting backend +2024-05-30 07:04:47,069 INFO MainThread:744 [wandb_init.py:init():614] setting up manager +2024-05-30 07:04:47,073 INFO MainThread:744 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 07:04:47,074 INFO MainThread:744 [wandb_init.py:init():622] backend started and connected +2024-05-30 07:04:47,078 INFO MainThread:744 [wandb_init.py:init():711] updated telemetry +2024-05-30 07:04:47,088 INFO MainThread:744 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 07:04:47,354 INFO MainThread:744 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 07:04:47,464 INFO MainThread:744 [wandb_run.py:_on_init():2405] got version response +2024-05-30 07:04:47,464 INFO MainThread:744 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 07:04:47,773 INFO MainThread:744 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 07:04:47,773 INFO MainThread:744 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 07:04:47,773 INFO MainThread:744 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 07:04:47,773 INFO MainThread:744 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 07:04:47,776 INFO MainThread:744 [wandb_init.py:init():838] run started, returning control to user process +2024-05-30 07:05:24,786 WARNING MsgRouterThr:744 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/run-9l46ukwo.wandb b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/run-9l46ukwo.wandb new file mode 100644 index 0000000000000000000000000000000000000000..6e3e8aef855fc25ba86b13ee71cc1d3e34387002 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240530_070447-9l46ukwo/run-9l46ukwo.wandb differ diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz new file mode 100644 index 0000000000000000000000000000000000000000..7812497bc95e5894c8e880736bfb06aa22bb2fae --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707 +size 712 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d6cf697b1c1c752d4d8a78d702a70042ad047ce9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e064c2eecfdc58d552844467da7bd56eca596098322bfd266a7e1312abdd5735 +size 1068 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4dda21d9ad4ce279b8474ecce9697e3290e96bfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e6b0e171782d5fd5a61d1844dc946eb27c5f6b2e8075d436b23808433142ebc +size 1068 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy new file mode 100644 index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809 +size 120 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy new file mode 100644 index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05 +size 104 diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fe0929413bc2a99f5a4c4e843c761c0da1f2169 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b9f6a3bb1a31b1f8de918ae4aae7619d6978d37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/configuration_altclip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75dc4869bd4f52405a210d133ca827c9330b98a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/modeling_altclip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e4c3871ff15fc254747aeac68cb5c4e7944f87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/altclip/__pycache__/processing_altclip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fa67047f43249cb8448e51f1c3df5fc739d3b31 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/herbert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54037995229f829e961f96670b86066097d69471 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/herbert/__init__.py @@ -0,0 +1,45 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available + + +_import_structure = {"tokenization_herbert": ["HerbertTokenizer"]} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"] + + +if TYPE_CHECKING: + from .tokenization_herbert import HerbertTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_herbert_fast import HerbertTokenizerFast + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdbd7f77ee4cdfaad03ce35fb9eff6da5afc2d76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b36e78b8465dff5af7d2f2246d60f3f5d8550160 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a8e085277798662af1ea196f41876a4fb2bce3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py b/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py new file mode 100644 index 0000000000000000000000000000000000000000..6e37922028e7beddf34bebdb7109cdcf0f7b3fb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py @@ -0,0 +1,644 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import re +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + + +# Copied from transformers.models.xlm.tokenization_xlm.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length + strings) + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct +def replace_unicode_punct(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl + """ + text = text.replace(",", ",") + text = re.sub(r"。\s*", ". ", text) + text = text.replace("、", ",") + text = text.replace("”", '"') + text = text.replace("“", '"') + text = text.replace("∶", ":") + text = text.replace(":", ":") + text = text.replace("?", "?") + text = text.replace("《", '"') + text = text.replace("》", '"') + text = text.replace(")", ")") + text = text.replace("!", "!") + text = text.replace("(", "(") + text = text.replace(";", ";") + text = text.replace("1", "1") + text = text.replace("」", '"') + text = text.replace("「", '"') + text = text.replace("0", "0") + text = text.replace("3", "3") + text = text.replace("2", "2") + text = text.replace("5", "5") + text = text.replace("6", "6") + text = text.replace("9", "9") + text = text.replace("7", "7") + text = text.replace("8", "8") + text = text.replace("4", "4") + text = re.sub(r".\s*", ". ", text) + text = text.replace("~", "~") + text = text.replace("’", "'") + text = text.replace("…", "...") + text = text.replace("━", "-") + text = text.replace("〈", "<") + text = text.replace("〉", ">") + text = text.replace("【", "[") + text = text.replace("】", "]") + text = text.replace("%", "%") + return text + + +# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char +def remove_non_printing_char(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl + """ + output = [] + for char in text: + cat = unicodedata.category(char) + if cat.startswith("C"): + continue + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + do_split_on_punc (`bool`, *optional*, defaults to `True`): + In some instances we want to skip the basic punctuation splitting so that later tokenization can capture + the full context of the words, such as contractions. + """ + + def __init__( + self, + do_lower_case=True, + never_split=None, + tokenize_chinese_chars=True, + strip_accents=None, + do_split_on_punc=True, + ): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + self.do_split_on_punc = do_split_on_punc + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + # prevents treating the same character with different unicode codepoints as different characters + unicode_normalized_text = unicodedata.normalize("NFC", text) + orig_tokens = whitespace_tokenize(unicode_normalized_text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if not self.do_split_on_punc or (never_split is not None and text in never_split): + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class HerbertTokenizer(PreTrainedTokenizer): + """ + Construct a BPE tokenizer for HerBERT. + + Peculiarities: + + - uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a + punctuation character will be treated separately. + + - Such pretokenized input is BPE subtokenized + + This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the + superclass for more information regarding methods. + """ + + vocab_files_names = VOCAB_FILES_NAMES + + def __init__( + self, + vocab_file, + merges_file, + tokenizer_file=None, + cls_token="", + unk_token="", + pad_token="", + mask_token="", + sep_token="", + bos_token="", + do_lowercase_and_remove_accent=False, + additional_special_tokens=[ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ], + lang2id=None, + id2lang=None, + **kwargs, + ): + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use HerbertTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses + + # cache of sm.MosesPunctNormalizer instance + self.cache_moses_punct_normalizer = {} + # cache of sm.MosesTokenizer instance + self.cache_moses_tokenizer = {} + self.lang_with_custom_tokenizer = {"zh", "th", "ja"} + # True for current supported model (v1.2.0), False for XLM-17 & 100 + self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent + self.lang2id = lang2id + self.id2lang = id2lang + if lang2id is not None and id2lang is not None: + assert len(lang2id) == len(id2lang) + + self.ja_word_tokenizer = None + self.zh_word_tokenizer = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + merges = merges_handle.read().split("\n")[:-1] + merges = [tuple(merge.split()[:2]) for merge in merges] + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {} + + super().__init__( + unk_token=unk_token, + bos_token=bos_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + additional_special_tokens=additional_special_tokens, + lang2id=lang2id, + id2lang=id2lang, + do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, + tokenizer_file=None, + **kwargs, + ) + + self.bert_pre_tokenizer = BasicTokenizer( + do_lower_case=False, + never_split=self.all_special_tokens, + tokenize_chinese_chars=False, + strip_accents=False, + ) + + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case + def do_lower_case(self): + return self.do_lowercase_and_remove_accent + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm + def moses_punct_norm(self, text, lang): + if lang not in self.cache_moses_punct_normalizer: + punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) + self.cache_moses_punct_normalizer[lang] = punct_normalizer + else: + punct_normalizer = self.cache_moses_punct_normalizer[lang] + return punct_normalizer.normalize(text) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize + def moses_tokenize(self, text, lang): + if lang not in self.cache_moses_tokenizer: + moses_tokenizer = self.sm.MosesTokenizer(lang=lang) + self.cache_moses_tokenizer[lang] = moses_tokenizer + else: + moses_tokenizer = self.cache_moses_tokenizer[lang] + return moses_tokenizer.tokenize(text, return_str=False, escape=False) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline + def moses_pipeline(self, text, lang): + text = replace_unicode_punct(text) + text = self.moses_punct_norm(text, lang) + text = remove_non_printing_char(text) + return text + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize + def ja_tokenize(self, text): + if self.ja_word_tokenizer is None: + try: + import Mykytea + + self.ja_word_tokenizer = Mykytea.Mykytea( + f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" + ) + except (AttributeError, ImportError): + logger.error( + "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" + " (https://github.com/chezou/Mykytea-python) with the following steps" + ) + logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") + logger.error("2. autoreconf -i") + logger.error("3. ./configure --prefix=$HOME/local") + logger.error("4. make && make install") + logger.error("5. pip install kytea") + raise + return list(self.ja_word_tokenizer.getWS(text)) + + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size + def vocab_size(self): + return len(self.encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe + def bpe(self, token): + word = tuple(token[:-1]) + (token[-1] + "",) + if token in self.cache: + return self.cache[token] + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + if word == "\n ": + word = "\n" + self.cache[token] = word + return word + + def _tokenize(self, text): + pre_tokens = self.bert_pre_tokenizer.tokenize(text) + + split_tokens = [] + for token in pre_tokens: + if token: + split_tokens.extend(list(self.bpe(token).split(" "))) + + return split_tokens + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, self.unk_token) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = "".join(tokens).replace("", " ").strip() + return out_string + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An XLM sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + + """ + bos = [self.bos_token_id] + sep = [self.sep_token_id] + + if token_ids_1 is None: + return bos + token_ids_0 + sep + return bos + token_ids_0 + sep + token_ids_1 + sep + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__ + def __getstate__(self): + state = self.__dict__.copy() + state["sm"] = None + return state + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__ + def __setstate__(self, d): + self.__dict__ = d + + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use XLMTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses diff --git a/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd5db58f1b93a0576bdcc1457a416e0f5856315 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_herbert import HerbertTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + + +class HerbertTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library). + + Peculiarities: + + - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of + a punctuation character will be treated separately. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the + superclass for more information regarding methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = HerbertTokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + cls_token="", + unk_token="", + pad_token="", + mask_token="", + sep_token="", + **kwargs, + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + cls_token=cls_token, + unk_token=unk_token, + pad_token=pad_token, + mask_token=mask_token, + sep_token=sep_token, + **kwargs, + ) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An HerBERT, like BERT sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + + cls = [self.cls_token_id] + sep = [self.sep_token_id] + if token_ids_1 is None: + return cls + token_ids_0 + sep + + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like + BERT sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/pvt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cab5af9af7c99775651e2f4a322265670676b8da --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt/__init__.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig", "PvtOnnxConfig"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_pvt"] = ["PvtImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_pvt"] = [ + "PVT_PRETRAINED_MODEL_ARCHIVE_LIST", + "PvtForImageClassification", + "PvtModel", + "PvtPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig, PvtOnnxConfig + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_pvt import PvtImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_pvt import ( + PVT_PRETRAINED_MODEL_ARCHIVE_LIST, + PvtForImageClassification, + PvtModel, + PvtPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afb8a9a953976f6114ddf5ecb5f35f93f368ed9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8813dfe5d830aebc04c3d694de97ae6e0dcf8d15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py b/venv/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc99b49cf0d78be841d90fbd0fc5e99f4dab192 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py @@ -0,0 +1,162 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Pvt model configuration""" + +from collections import OrderedDict +from typing import Callable, List, Mapping + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class PvtConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Pvt + [Xrenya/pvt-tiny-224](https://huggingface.co/Xrenya/pvt-tiny-224) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + image_size (`int`, *optional*, defaults to 224): + The input image size + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + num_encoder_blocks (`int`, *optional*, defaults to 4): + The number of encoder blocks (i.e. stages in the Mix Transformer encoder). + depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`): + The number of layers in each encoder block. + sequence_reduction_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`): + Sequence reduction ratios in each encoder block. + hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`): + Dimension of each of the encoder blocks. + patch_sizes (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): + Patch size before each encoder block. + strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): + Stride before each encoder block. + num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`): + Number of attention heads for each attention layer in each block of the Transformer encoder. + mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`): + Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the + encoder blocks. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + drop_path_rate (`float`, *optional*, defaults to 0.0): + The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether or not a learnable bias should be added to the queries, keys and values. + num_labels ('int', *optional*, defaults to 1000): + The number of classes. + Example: + + ```python + >>> from transformers import PvtModel, PvtConfig + + >>> # Initializing a PVT Xrenya/pvt-tiny-224 style configuration + >>> configuration = PvtConfig() + + >>> # Initializing a model from the Xrenya/pvt-tiny-224 style configuration + >>> model = PvtModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "pvt" + + def __init__( + self, + image_size: int = 224, + num_channels: int = 3, + num_encoder_blocks: int = 4, + depths: List[int] = [2, 2, 2, 2], + sequence_reduction_ratios: List[int] = [8, 4, 2, 1], + hidden_sizes: List[int] = [64, 128, 320, 512], + patch_sizes: List[int] = [4, 2, 2, 2], + strides: List[int] = [4, 2, 2, 2], + num_attention_heads: List[int] = [1, 2, 5, 8], + mlp_ratios: List[int] = [8, 8, 4, 4], + hidden_act: Mapping[str, Callable] = "gelu", + hidden_dropout_prob: float = 0.0, + attention_probs_dropout_prob: float = 0.0, + initializer_range: float = 0.02, + drop_path_rate: float = 0.0, + layer_norm_eps: float = 1e-6, + qkv_bias: bool = True, + num_labels: int = 1000, + **kwargs, + ): + super().__init__(**kwargs) + + self.image_size = image_size + self.num_channels = num_channels + self.num_encoder_blocks = num_encoder_blocks + self.depths = depths + self.sequence_reduction_ratios = sequence_reduction_ratios + self.hidden_sizes = hidden_sizes + self.patch_sizes = patch_sizes + self.strides = strides + self.mlp_ratios = mlp_ratios + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.drop_path_rate = drop_path_rate + self.layer_norm_eps = layer_norm_eps + self.num_labels = num_labels + self.qkv_bias = qkv_bias + + +class PvtOnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + @property + def default_onnx_opset(self) -> int: + return 12 diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..187f3200d608a57a473b429c8dae81560863cd31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Pvt checkpoints from the original library.""" + + +import argparse +from pathlib import Path + +import requests +import torch +from PIL import Image + +from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config): + rename_keys = [] + for i in range(config.num_encoder_blocks): + # Remane embedings' paramters + rename_keys.append((f"pos_embed{i + 1}", f"pvt.encoder.patch_embeddings.{i}.position_embeddings")) + + rename_keys.append((f"patch_embed{i + 1}.proj.weight", f"pvt.encoder.patch_embeddings.{i}.projection.weight")) + rename_keys.append((f"patch_embed{i + 1}.proj.bias", f"pvt.encoder.patch_embeddings.{i}.projection.bias")) + rename_keys.append((f"patch_embed{i + 1}.norm.weight", f"pvt.encoder.patch_embeddings.{i}.layer_norm.weight")) + rename_keys.append((f"patch_embed{i + 1}.norm.bias", f"pvt.encoder.patch_embeddings.{i}.layer_norm.bias")) + + for j in range(config.depths[i]): + # Rename blocks' parameters + rename_keys.append( + (f"block{i + 1}.{j}.attn.q.weight", f"pvt.encoder.block.{i}.{j}.attention.self.query.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.q.bias", f"pvt.encoder.block.{i}.{j}.attention.self.query.bias") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.kv.weight", f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight") + ) + rename_keys.append((f"block{i + 1}.{j}.attn.kv.bias", f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias")) + + if config.sequence_reduction_ratios[i] > 1: + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.norm.weight", + f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.weight", + ) + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.norm.bias", f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.bias") + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.sr.weight", + f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.weight", + ) + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.sr.bias", + f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.bias", + ) + ) + + rename_keys.append( + (f"block{i + 1}.{j}.attn.proj.weight", f"pvt.encoder.block.{i}.{j}.attention.output.dense.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.proj.bias", f"pvt.encoder.block.{i}.{j}.attention.output.dense.bias") + ) + + rename_keys.append((f"block{i + 1}.{j}.norm1.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_1.weight")) + rename_keys.append((f"block{i + 1}.{j}.norm1.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_1.bias")) + + rename_keys.append((f"block{i + 1}.{j}.norm2.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_2.weight")) + rename_keys.append((f"block{i + 1}.{j}.norm2.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_2.bias")) + + rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense1.weight")) + rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense1.bias")) + rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense2.weight")) + rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense2.bias")) + + # Rename cls token + rename_keys.extend( + [ + ("cls_token", "pvt.encoder.patch_embeddings.3.cls_token"), + ] + ) + # Rename norm layer and classifier layer + rename_keys.extend( + [ + ("norm.weight", "pvt.encoder.layer_norm.weight"), + ("norm.bias", "pvt.encoder.layer_norm.bias"), + ("head.weight", "classifier.weight"), + ("head.bias", "classifier.bias"), + ] + ) + + return rename_keys + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_k_v(state_dict, config): + # for each of the encoder blocks: + for i in range(config.num_encoder_blocks): + for j in range(config.depths[i]): + # read in weights + bias of keys and values (which is a single matrix in the original implementation) + kv_weight = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight") + kv_bias = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias") + # next, add keys and values (in that order) to the state dict + state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[: config.hidden_sizes[i], :] + state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]] + + state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[ + config.hidden_sizes[i] :, : + ] + state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :] + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@torch.no_grad() +def convert_pvt_checkpoint(pvt_size, pvt_checkpoint, pytorch_dump_folder_path): + """ + Copy/paste/tweak model's weights to our PVT structure. + """ + + # define default Pvt configuration + if pvt_size == "tiny": + config_path = "Zetatech/pvt-tiny-224" + elif pvt_size == "small": + config_path = "Zetatech/pvt-small-224" + elif pvt_size == "medium": + config_path = "Zetatech/pvt-medium-224" + elif pvt_size == "large": + config_path = "Zetatech/pvt-large-224" + else: + raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given") + config = PvtConfig(name_or_path=config_path) + # load original model from https://github.com/whai362/PVT + state_dict = torch.load(pvt_checkpoint, map_location="cpu") + + rename_keys = create_rename_keys(config) + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_k_v(state_dict, config) + + # load HuggingFace model + model = PvtForImageClassification(config).eval() + model.load_state_dict(state_dict) + + # Check outputs on an image, prepared by PVTFeatureExtractor + image_processor = PvtImageProcessor(size=config.image_size) + encoding = image_processor(images=prepare_img(), return_tensors="pt") + pixel_values = encoding["pixel_values"] + outputs = model(pixel_values) + logits = outputs.logits.detach().cpu() + + if pvt_size == "tiny": + expected_slice_logits = torch.tensor([-1.4192, -1.9158, -0.9702]) + elif pvt_size == "small": + expected_slice_logits = torch.tensor([0.4353, -0.1960, -0.2373]) + elif pvt_size == "medium": + expected_slice_logits = torch.tensor([-0.2914, -0.2231, 0.0321]) + elif pvt_size == "large": + expected_slice_logits = torch.tensor([0.3740, -0.7739, -0.4214]) + else: + raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given") + + assert torch.allclose(logits[0, :3], expected_slice_logits, atol=1e-4) + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model pytorch_model.bin to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--pvt_size", + default="tiny", + type=str, + help="Size of the PVT pretrained model you'd like to convert.", + ) + parser.add_argument( + "--pvt_checkpoint", + default="pvt_tiny.pth", + type=str, + help="Checkpoint of the PVT pretrained model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + + args = parser.parse_args() + convert_pvt_checkpoint(args.pvt_size, args.pvt_checkpoint, args.pytorch_dump_folder_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py b/venv/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py new file mode 100644 index 0000000000000000000000000000000000000000..f3907edf3af09394acbacb2db992c7a3a71ef091 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py @@ -0,0 +1,290 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Pvt.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import resize, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class PvtImageProcessor(BaseImageProcessor): + r""" + Constructs a PVT image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `(size["height"], + size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 224, "width": 224} + size = get_size_dict(size) + self.do_resize = do_resize + self.do_rescale = do_rescale + self.do_normalize = do_normalize + self.size = size + self.resample = resample + self.rescale_factor = rescale_factor + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self._valid_processor_keys = [ + "images", + "do_resize", + "size", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "return_tensors", + "data_format", + "input_data_format", + ] + + # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after + resizing. + resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has + an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + resample = resample if resample is not None else self.resample + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + size = size if size is not None else self.size + size_dict = get_size_dict(size) + + images = make_list_of_images(images) + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py b/venv/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py new file mode 100644 index 0000000000000000000000000000000000000000..b169af0cbd5668ba975d0a80c3b83d99131c687f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py @@ -0,0 +1,668 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch PVT model.""" + +import collections +import math +from typing import Iterable, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_pvt import PvtConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "PvtConfig" + +_CHECKPOINT_FOR_DOC = "Zetatech/pvt-tiny-224" +_EXPECTED_OUTPUT_SHAPE = [1, 50, 512] + +_IMAGE_CLASS_CHECKPOINT = "Zetatech/pvt-tiny-224" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import PVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt +class PvtDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class PvtPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__( + self, + config: PvtConfig, + image_size: Union[int, Iterable[int]], + patch_size: Union[int, Iterable[int]], + stride: int, + num_channels: int, + hidden_size: int, + cls_token: bool = False, + ): + super().__init__() + self.config = config + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.position_embeddings = nn.Parameter( + torch.randn(1, num_patches + 1 if cls_token else num_patches, hidden_size) + ) + self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size)) if cls_token else None + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=stride, stride=patch_size) + self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(p=config.hidden_dropout_prob) + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + num_patches = height * width + if num_patches == self.config.image_size * self.config.image_size: + return self.position_embeddings + embeddings = embeddings.reshape(1, height, width, -1).permute(0, 3, 1, 2) + interpolated_embeddings = F.interpolate(embeddings, size=(height, width), mode="bilinear") + interpolated_embeddings = interpolated_embeddings.reshape(1, -1, height * width).permute(0, 2, 1) + return interpolated_embeddings + + def forward(self, pixel_values: torch.Tensor) -> Tuple[torch.Tensor, int, int]: + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + patch_embed = self.projection(pixel_values) + *_, height, width = patch_embed.shape + patch_embed = patch_embed.flatten(2).transpose(1, 2) + embeddings = self.layer_norm(patch_embed) + if self.cls_token is not None: + cls_token = self.cls_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_token, embeddings), dim=1) + position_embeddings = self.interpolate_pos_encoding(self.position_embeddings[:, 1:], height, width) + position_embeddings = torch.cat((self.position_embeddings[:, :1], position_embeddings), dim=1) + else: + position_embeddings = self.interpolate_pos_encoding(self.position_embeddings, height, width) + embeddings = self.dropout(embeddings + position_embeddings) + + return embeddings, height, width + + +class PvtSelfOutput(nn.Module): + def __init__(self, config: PvtConfig, hidden_size: int): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class PvtEfficientSelfAttention(nn.Module): + """Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://arxiv.org/abs/2102.12122).""" + + def __init__( + self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float + ): + super().__init__() + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + + if self.hidden_size % self.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " + f"heads ({self.num_attention_heads})" + ) + + self.attention_head_size = int(self.hidden_size / self.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + self.sequences_reduction_ratio = sequences_reduction_ratio + if sequences_reduction_ratio > 1: + self.sequence_reduction = nn.Conv2d( + hidden_size, hidden_size, kernel_size=sequences_reduction_ratio, stride=sequences_reduction_ratio + ) + self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + + def transpose_for_scores(self, hidden_states: int) -> torch.Tensor: + new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + hidden_states = hidden_states.view(new_shape) + return hidden_states.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + height: int, + width: int, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor]: + query_layer = self.transpose_for_scores(self.query(hidden_states)) + + if self.sequences_reduction_ratio > 1: + batch_size, seq_len, num_channels = hidden_states.shape + # Reshape to (batch_size, num_channels, height, width) + hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) + # Apply sequence reduction + hidden_states = self.sequence_reduction(hidden_states) + # Reshape back to (batch_size, seq_len, num_channels) + hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) + hidden_states = self.layer_norm(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +class PvtAttention(nn.Module): + def __init__( + self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float + ): + super().__init__() + self.self = PvtEfficientSelfAttention( + config, + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + sequences_reduction_ratio=sequences_reduction_ratio, + ) + self.output = PvtSelfOutput(config, hidden_size=hidden_size) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False + ) -> Tuple[torch.Tensor]: + self_outputs = self.self(hidden_states, height, width, output_attentions) + + attention_output = self.output(self_outputs[0]) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class PvtFFN(nn.Module): + def __init__( + self, + config: PvtConfig, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + ): + super().__init__() + out_features = out_features if out_features is not None else in_features + self.dense1 = nn.Linear(in_features, hidden_features) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + self.dense2 = nn.Linear(hidden_features, out_features) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense1(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense2(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class PvtLayer(nn.Module): + def __init__( + self, + config: PvtConfig, + hidden_size: int, + num_attention_heads: int, + drop_path: float, + sequences_reduction_ratio: float, + mlp_ratio: float, + ): + super().__init__() + self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + self.attention = PvtAttention( + config=config, + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + sequences_reduction_ratio=sequences_reduction_ratio, + ) + self.drop_path = PvtDropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + mlp_hidden_size = int(hidden_size * mlp_ratio) + self.mlp = PvtFFN(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) + + def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): + self_attention_outputs = self.attention( + hidden_states=self.layer_norm_1(hidden_states), + height=height, + width=width, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] + + attention_output = self.drop_path(attention_output) + hidden_states = attention_output + hidden_states + + mlp_output = self.mlp(self.layer_norm_2(hidden_states)) + + mlp_output = self.drop_path(mlp_output) + layer_output = hidden_states + mlp_output + + outputs = (layer_output,) + outputs + + return outputs + + +class PvtEncoder(nn.Module): + def __init__(self, config: PvtConfig): + super().__init__() + self.config = config + + # stochastic depth decay rule + drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths)).tolist() + + # patch embeddings + embeddings = [] + + for i in range(config.num_encoder_blocks): + embeddings.append( + PvtPatchEmbeddings( + config=config, + image_size=config.image_size if i == 0 else self.config.image_size // (2 ** (i + 1)), + patch_size=config.patch_sizes[i], + stride=config.strides[i], + num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], + hidden_size=config.hidden_sizes[i], + cls_token=i == config.num_encoder_blocks - 1, + ) + ) + self.patch_embeddings = nn.ModuleList(embeddings) + + # Transformer blocks + blocks = [] + cur = 0 + for i in range(config.num_encoder_blocks): + # each block consists of layers + layers = [] + if i != 0: + cur += config.depths[i - 1] + for j in range(config.depths[i]): + layers.append( + PvtLayer( + config=config, + hidden_size=config.hidden_sizes[i], + num_attention_heads=config.num_attention_heads[i], + drop_path=drop_path_decays[cur + j], + sequences_reduction_ratio=config.sequence_reduction_ratios[i], + mlp_ratio=config.mlp_ratios[i], + ) + ) + blocks.append(nn.ModuleList(layers)) + + self.block = nn.ModuleList(blocks) + + # Layer norms + self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) + + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + batch_size = pixel_values.shape[0] + num_blocks = len(self.block) + hidden_states = pixel_values + for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)): + # first, obtain patch embeddings + hidden_states, height, width = embedding_layer(hidden_states) + # second, send embeddings through blocks + for block in block_layer: + layer_outputs = block(hidden_states, height, width, output_attentions) + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + if idx != num_blocks - 1: + hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() + hidden_states = self.layer_norm(hidden_states) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class PvtPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = PvtConfig + base_model_prefix = "pvt" + main_input_name = "pixel_values" + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, PvtPatchEmbeddings): + module.position_embeddings.data = nn.init.trunc_normal_( + module.position_embeddings.data, + mean=0.0, + std=self.config.initializer_range, + ) + if module.cls_token is not None: + module.cls_token.data = nn.init.trunc_normal_( + module.cls_token.data, + mean=0.0, + std=self.config.initializer_range, + ) + + +PVT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`~PvtConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +PVT_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PvtImageProcessor.__call__`] + for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Pvt encoder outputting raw hidden-states without any specific head on top.", + PVT_START_DOCSTRING, +) +class PvtModel(PvtPreTrainedModel): + def __init__(self, config: PvtConfig): + super().__init__(config) + self.config = config + + # hierarchical Transformer encoder + self.encoder = PvtEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_outputs = self.encoder( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """ + Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of + the [CLS] token) e.g. for ImageNet. + """, + PVT_START_DOCSTRING, +) +class PvtForImageClassification(PvtPreTrainedModel): + def __init__(self, config: PvtConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.pvt = PvtModel(config) + + # Classifier head + self.classifier = ( + nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor], + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.pvt( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.classifier(sequence_output[:, 0, :]) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + )