diff --git a/.gitattributes b/.gitattributes index 66ae3ebd0a285b6c4f31ce7e0474818b3366c318..eb3511dd34fc0932771bb312cf3e0bc56cde7932 100644 --- a/.gitattributes +++ b/.gitattributes @@ -91,3 +91,4 @@ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..592036bf44262b5a898df771b2c07cc06c2a6897 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f33e367984cb733f88087d31e53dc1449058f30f28be9b93d239b1c04961437 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8b5cb8fe16ccfee1ae84d8d6889c87c984947ca --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a384a318af6aa01d6a74db37ea6d0c1aba2a9a1010ad7bcca72ff654c74124 +size 50332749 diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52b5f35e4b30868f90f3067aa10600c3009d2ff9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682601 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..56b1fcaf1511a4b87a09e21a7e5f80185c4c7031 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:10:30:02,550 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:07,063 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:07,065 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:07,065 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:10:30:13,560 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:10:30:13,560 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 10:30:13,185] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:10:30:14,997 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:14,997 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,016 INFO [task.py:395] Building contexts for indiccopa-hi on rank 3... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 110584.29it/s] +2024-05-14:10:30:16,730 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..90b553a0d2317e49ac11889e7803f23b859289a9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:02.415083", + "startedAt": "2024-05-14T10:30:01.926182", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3394.3491184210525, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3305.051, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3288.828, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3325.177, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3325.078, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3324.811, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3351.43, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3320.693, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92177200317383 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..c3ad6a3530e72181b3b2e93339791388b04999cd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 21}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..56e8fbb86c828a2348ae666f8fa36c6acfd6af5d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug-internal.log @@ -0,0 +1,191 @@ +2024-05-14 10:30:01,938 INFO StreamThr :1833 [internal.py:wandb_internal():85] W&B internal server running at pid: 1833, started at: 2024-05-14 10:30:01.938076 +2024-05-14 10:30:01,941 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:30:01,942 INFO WriterThread:1833 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/run-easc41gc.wandb +2024-05-14 10:30:01,943 DEBUG SenderThread:1833 [sender.py:send():378] send: header +2024-05-14 10:30:01,951 DEBUG SenderThread:1833 [sender.py:send():378] send: run +2024-05-14 10:30:02,215 INFO SenderThread:1833 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files +2024-05-14 10:30:02,215 INFO SenderThread:1833 [sender.py:_start_run_threads():1123] run started: easc41gc with start time 1715682601.93745 +2024-05-14 10:30:02,222 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:30:02,222 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:30:02,305 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:30:02,307 DEBUG HandlerThread:1833 [system_info.py:__init__():26] System info init +2024-05-14 10:30:02,307 DEBUG HandlerThread:1833 [system_info.py:__init__():41] System info init done +2024-05-14 10:30:02,307 INFO HandlerThread:1833 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:30:02,307 INFO SystemMonitor:1833 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:30:02,307 INFO HandlerThread:1833 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:30:02,308 INFO SystemMonitor:1833 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:30:02,308 INFO SystemMonitor:1833 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:30:02,309 INFO SystemMonitor:1833 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:30:02,309 INFO SystemMonitor:1833 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:30:02,414 DEBUG HandlerThread:1833 [system_info.py:probe():150] Probing system +2024-05-14 10:30:02,423 DEBUG HandlerThread:1833 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:30:02,444 ERROR HandlerThread:1833 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:30:02,444 DEBUG HandlerThread:1833 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:30:02,444 DEBUG HandlerThread:1833 [system_info.py:probe():198] Probing system done +2024-05-14 10:30:02,444 DEBUG HandlerThread:1833 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:02.415083', 'startedAt': '2024-05-14T10:30:01.926182', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.3491184210525, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3305.051, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3288.828, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3325.177, 'min': 800.0, 'max': 3400.0}, {'current': 3325.078, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3324.811, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3351.43, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3320.693, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92177200317383}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:30:02,444 INFO HandlerThread:1833 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:30:02,444 INFO HandlerThread:1833 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:30:02,445 INFO HandlerThread:1833 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:30:02,449 DEBUG SenderThread:1833 [sender.py:send():378] send: files +2024-05-14 10:30:02,449 INFO SenderThread:1833 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:30:02,546 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:30:02,546 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:02,547 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:30:02,547 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:02,761 DEBUG SenderThread:1833 [sender.py:send():378] send: telemetry +2024-05-14 10:30:02,939 INFO wandb-upload_0:1833 [upload_job.py:push():130] Uploaded file /tmp/tmp38tu7rqewandb/71400s0p-wandb-metadata.json +2024-05-14 10:30:03,216 INFO Thread-12 :1833 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:03,217 INFO Thread-12 :1833 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-metadata.json +2024-05-14 10:30:03,217 INFO Thread-12 :1833 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt +2024-05-14 10:30:05,217 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:07,065 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:09,221 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:12,066 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:13,225 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:15,228 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:17,229 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:17,236 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:17,548 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:17,548 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:19,231 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:22,660 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:23,941 DEBUG SenderThread:1833 [sender.py:send():378] send: exit +2024-05-14 10:30:23,941 INFO SenderThread:1833 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 10:30:23,941 INFO SenderThread:1833 [sender.py:send_exit():587] handling runtime: 21 +2024-05-14 10:30:23,942 INFO SenderThread:1833 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,942 INFO SenderThread:1833 [sender.py:send_exit():593] send defer +2024-05-14 10:30:23,943 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,943 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:30:23,943 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,943 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:30:23,943 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:30:23,943 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,943 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:30:23,943 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,943 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:30:23,943 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:30:23,943 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,943 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:30:23,943 INFO HandlerThread:1833 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:30:23,943 DEBUG SystemMonitor:1833 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:30:23,944 DEBUG SystemMonitor:1833 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:30:23,944 INFO HandlerThread:1833 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:30:23,944 DEBUG SystemMonitor:1833 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:30:23,944 INFO HandlerThread:1833 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:30:23,946 INFO HandlerThread:1833 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:30:23,946 INFO HandlerThread:1833 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:30:23,947 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,947 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:30:23,947 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:30:23,947 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,947 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:30:23,947 DEBUG SenderThread:1833 [sender.py:send():378] send: stats +2024-05-14 10:30:23,948 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,948 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:30:23,948 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:30:23,948 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,948 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:30:23,948 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,948 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:30:23,948 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:30:23,948 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,948 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:30:23,948 DEBUG SenderThread:1833 [sender.py:send():378] send: summary +2024-05-14 10:30:23,949 INFO SenderThread:1833 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,949 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,949 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:30:23,949 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:30:23,949 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,949 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:30:23,949 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,949 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:30:23,952 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:24,121 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:30:24,122 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:24,122 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:30:24,122 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:24,122 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:30:24,236 INFO Thread-12 :1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml +2024-05-14 10:30:24,236 INFO Thread-12 :1833 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json +2024-05-14 10:30:24,941 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:25,201 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:30:25,201 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:25,201 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,201 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:30:25,201 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,201 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:30:25,201 INFO SenderThread:1833 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:30:25,202 INFO SenderThread:1833 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:30:25,202 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:30:25,202 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,202 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:30:25,202 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,202 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:30:25,202 INFO SenderThread:1833 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml config.yaml +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log output.log +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json wandb-summary.json +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:30:25,237 INFO SenderThread:1833 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt requirements.txt +2024-05-14 10:30:25,242 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:30:25,242 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,244 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:30:25,244 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,244 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:30:25,244 INFO SenderThread:1833 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:25,476 INFO wandb-upload_0:1833 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/output.log +2024-05-14 10:30:25,639 INFO wandb-upload_1:1833 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/config.yaml +2024-05-14 10:30:25,721 INFO wandb-upload_3:1833 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/requirements.txt +2024-05-14 10:30:25,753 INFO wandb-upload_2:1833 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/files/wandb-summary.json +2024-05-14 10:30:25,941 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:25,942 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:25,954 INFO Thread-11 (_thread_body):1833 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:30:25,954 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,954 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:30:25,955 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,955 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:30:25,955 INFO SenderThread:1833 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:30:25,955 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:30:25,955 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,955 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:30:25,955 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,955 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:30:25,955 INFO SenderThread:1833 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:30:26,176 INFO SenderThread:1833 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:30:26,176 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:30:26,176 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,176 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:30:26,176 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,176 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:30:26,176 INFO SenderThread:1833 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:30:26,177 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,177 DEBUG SenderThread:1833 [sender.py:send():378] send: final +2024-05-14 10:30:26,177 INFO HandlerThread:1833 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:30:26,177 DEBUG SenderThread:1833 [sender.py:send():378] send: footer +2024-05-14 10:30:26,177 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,177 INFO SenderThread:1833 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:30:26,178 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:26,178 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,178 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:26,178 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,178 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:30:26,178 DEBUG SenderThread:1833 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:30:26,179 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:30:26,180 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:30:26,180 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:30:26,232 INFO MainThread:1833 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:30:26,232 INFO MainThread:1833 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:30:26,232 INFO MainThread:1833 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:30:26,232 DEBUG HandlerThread:1833 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:30:26,232 INFO HandlerThread:1833 [handler.py:finish():882] shutting down handler +2024-05-14 10:30:27,178 INFO WriterThread:1833 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/run-easc41gc.wandb +2024-05-14 10:30:27,232 INFO SenderThread:1833 [sender.py:finish():1545] shutting down sender +2024-05-14 10:30:27,232 INFO SenderThread:1833 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:27,232 INFO SenderThread:1833 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..117f3e78175d0cc5e176176cdb606383f4013e25 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Configure stats pid to 483 +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:30:01,934 WARNING MainThread:483 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:30:01,934 INFO MainThread:483 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug.log +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/logs/debug-internal.log +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:init():610] starting backend +2024-05-14 10:30:01,935 INFO MainThread:483 [wandb_init.py:init():614] setting up manager +2024-05-14 10:30:01,936 INFO MainThread:483 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:30:01,937 INFO MainThread:483 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:30:01,940 INFO MainThread:483 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:30:01,951 INFO MainThread:483 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:30:02,222 INFO MainThread:483 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:30:02,301 INFO MainThread:483 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:30:02,301 INFO MainThread:483 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:30:02,547 INFO MainThread:483 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:30:02,547 INFO MainThread:483 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:30:02,547 INFO MainThread:483 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:30:02,547 INFO MainThread:483 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:30:02,549 INFO MainThread:483 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:30:27,233 WARNING MsgRouterThr:483 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/run-easc41gc.wandb b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/run-easc41gc.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e18d9e34b8ad99f1e543dd860d71036c6bc12115 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103001-easc41gc/run-easc41gc.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c1ac4e49694bcc5d68ae5be2d5f70da2044a198 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715686900 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9956592d8c518ab613d2ad935c7762cef2b88e3f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:11:41:40,822 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:11:41:46,726 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:11:41:46,728 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:11:41:46,729 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +[2024-05-14 11:41:58,482] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:11:41:58,862 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:11:41:58,862 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:11:42:00,168 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:42:00,168 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:42:00,187 INFO [task.py:395] Building contexts for indiccopa-hi on rank 7... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 110532.25it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:11:42:01,858 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..dcae6367b5ad153fe7f4933bc5d1b5869935cd87 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T11:41:40.687975", + "startedAt": "2024-05-14T11:41:40.283769", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=global_step100" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3388.1249013157903, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3224.073, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3224.072, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3228.66, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3224.071, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3235.82, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3315.877, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3227.055, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 77.68970108032227 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..4841bc4504e219db83b702d09e68cfaa8fa95063 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 28}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..77cb9dd9ed238ccaa71fdda2e9fa5718b3112324 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug-internal.log @@ -0,0 +1,192 @@ +2024-05-14 11:41:40,294 INFO StreamThr :72094 [internal.py:wandb_internal():85] W&B internal server running at pid: 72094, started at: 2024-05-14 11:41:40.293793 +2024-05-14 11:41:40,296 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status +2024-05-14 11:41:40,298 INFO WriterThread:72094 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/run-7ou0qdee.wandb +2024-05-14 11:41:40,299 DEBUG SenderThread:72094 [sender.py:send():378] send: header +2024-05-14 11:41:40,313 DEBUG SenderThread:72094 [sender.py:send():378] send: run +2024-05-14 11:41:40,544 INFO SenderThread:72094 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files +2024-05-14 11:41:40,544 INFO SenderThread:72094 [sender.py:_start_run_threads():1123] run started: 7ou0qdee with start time 1715686900.293775 +2024-05-14 11:41:40,554 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 11:41:40,554 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: check_version +2024-05-14 11:41:40,637 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 11:41:40,639 DEBUG HandlerThread:72094 [system_info.py:__init__():26] System info init +2024-05-14 11:41:40,639 DEBUG HandlerThread:72094 [system_info.py:__init__():41] System info init done +2024-05-14 11:41:40,639 INFO HandlerThread:72094 [system_monitor.py:start():194] Starting system monitor +2024-05-14 11:41:40,639 INFO SystemMonitor:72094 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 11:41:40,639 INFO HandlerThread:72094 [system_monitor.py:probe():214] Collecting system info +2024-05-14 11:41:40,639 INFO SystemMonitor:72094 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 11:41:40,640 INFO SystemMonitor:72094 [interfaces.py:start():188] Started disk monitoring +2024-05-14 11:41:40,640 INFO SystemMonitor:72094 [interfaces.py:start():188] Started memory monitoring +2024-05-14 11:41:40,641 INFO SystemMonitor:72094 [interfaces.py:start():188] Started network monitoring +2024-05-14 11:41:40,687 DEBUG HandlerThread:72094 [system_info.py:probe():150] Probing system +2024-05-14 11:41:40,697 DEBUG HandlerThread:72094 [system_info.py:_probe_git():135] Probing git +2024-05-14 11:41:40,716 ERROR HandlerThread:72094 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 11:41:40,717 DEBUG HandlerThread:72094 [system_info.py:_probe_git():143] Probing git done +2024-05-14 11:41:40,717 DEBUG HandlerThread:72094 [system_info.py:probe():198] Probing system done +2024-05-14 11:41:40,717 DEBUG HandlerThread:72094 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T11:41:40.687975', 'startedAt': '2024-05-14T11:41:40.283769', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=global_step100'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3388.1249013157903, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3224.073, 'min': 800.0, 'max': 3400.0}, {'current': 3224.072, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3228.66, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3224.071, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3235.82, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3315.877, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3227.055, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 77.68970108032227}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 11:41:40,717 INFO HandlerThread:72094 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 11:41:40,717 INFO HandlerThread:72094 [system_monitor.py:probe():227] Publishing system info +2024-05-14 11:41:40,718 INFO HandlerThread:72094 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 11:41:40,721 DEBUG SenderThread:72094 [sender.py:send():378] send: files +2024-05-14 11:41:40,721 INFO SenderThread:72094 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 11:41:40,819 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 11:41:40,819 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: python_packages +2024-05-14 11:41:40,819 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 11:41:40,820 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: stop_status +2024-05-14 11:41:40,937 DEBUG SenderThread:72094 [sender.py:send():378] send: telemetry +2024-05-14 11:41:41,226 INFO wandb-upload_0:72094 [upload_job.py:push():130] Uploaded file /tmp/tmpj0t9f_5nwandb/3d0414us-wandb-metadata.json +2024-05-14 11:41:41,545 INFO Thread-12 :72094 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:41:41,545 INFO Thread-12 :72094 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-metadata.json +2024-05-14 11:41:41,545 INFO Thread-12 :72094 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt +2024-05-14 11:41:43,545 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:41:45,938 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:41:49,556 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:41:51,730 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:41:55,560 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:41:55,820 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 11:41:55,821 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: stop_status +2024-05-14 11:41:56,901 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:00,567 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:01,568 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:02,010 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:02,569 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:03,569 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:04,571 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:07,363 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:09,382 DEBUG SenderThread:72094 [sender.py:send():378] send: exit +2024-05-14 11:42:09,382 INFO SenderThread:72094 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 11:42:09,382 INFO SenderThread:72094 [sender.py:send_exit():587] handling runtime: 28 +2024-05-14 11:42:09,383 INFO SenderThread:72094 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 11:42:09,383 INFO SenderThread:72094 [sender.py:send_exit():593] send defer +2024-05-14 11:42:09,383 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,383 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 11:42:09,383 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,383 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 11:42:09,383 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 1 +2024-05-14 11:42:09,383 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,383 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 11:42:09,384 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,384 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 11:42:09,384 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 2 +2024-05-14 11:42:09,384 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,384 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 11:42:09,384 INFO HandlerThread:72094 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 11:42:09,384 DEBUG SystemMonitor:72094 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 11:42:09,384 INFO HandlerThread:72094 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 11:42:09,384 DEBUG SystemMonitor:72094 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 11:42:09,385 INFO HandlerThread:72094 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 11:42:09,385 DEBUG SystemMonitor:72094 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 11:42:09,385 INFO HandlerThread:72094 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 11:42:09,386 INFO HandlerThread:72094 [interfaces.py:finish():200] Joined network monitor +2024-05-14 11:42:09,386 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,386 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 11:42:09,387 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 3 +2024-05-14 11:42:09,387 DEBUG SenderThread:72094 [sender.py:send():378] send: stats +2024-05-14 11:42:09,387 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,387 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 11:42:09,387 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,387 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 11:42:09,387 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 4 +2024-05-14 11:42:09,387 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,387 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 11:42:09,388 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,388 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 11:42:09,388 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 5 +2024-05-14 11:42:09,388 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,388 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 11:42:09,388 DEBUG SenderThread:72094 [sender.py:send():378] send: summary +2024-05-14 11:42:09,388 INFO SenderThread:72094 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 11:42:09,389 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,389 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 11:42:09,389 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 6 +2024-05-14 11:42:09,389 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,389 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 11:42:09,389 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,389 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 11:42:09,391 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:09,481 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 7 +2024-05-14 11:42:09,481 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,481 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 11:42:09,481 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,481 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 11:42:09,574 INFO Thread-12 :72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml +2024-05-14 11:42:09,574 INFO Thread-12 :72094 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json +2024-05-14 11:42:10,382 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:12,496 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 8 +2024-05-14 11:42:12,496 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:12,496 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,497 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 11:42:12,497 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,497 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 11:42:12,497 INFO SenderThread:72094 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 11:42:12,497 INFO SenderThread:72094 [job_builder.py:_get_source_type():576] no source found +2024-05-14 11:42:12,497 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 9 +2024-05-14 11:42:12,497 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,497 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 11:42:12,497 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,497 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 11:42:12,498 INFO SenderThread:72094 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 11:42:12,576 INFO SenderThread:72094 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:12,576 INFO SenderThread:72094 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files +2024-05-14 11:42:12,576 INFO SenderThread:72094 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json wandb-summary.json +2024-05-14 11:42:12,576 INFO SenderThread:72094 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml config.yaml +2024-05-14 11:42:12,576 INFO SenderThread:72094 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt requirements.txt +2024-05-14 11:42:12,579 INFO SenderThread:72094 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-metadata.json wandb-metadata.json +2024-05-14 11:42:12,579 INFO SenderThread:72094 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log output.log +2024-05-14 11:42:12,579 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 10 +2024-05-14 11:42:12,579 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,580 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 11:42:12,581 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,581 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 11:42:12,581 INFO SenderThread:72094 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 11:42:12,809 INFO wandb-upload_0:72094 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/wandb-summary.json +2024-05-14 11:42:12,981 INFO wandb-upload_1:72094 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/config.yaml +2024-05-14 11:42:13,055 INFO wandb-upload_3:72094 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/output.log +2024-05-14 11:42:13,065 INFO wandb-upload_2:72094 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/files/requirements.txt +2024-05-14 11:42:13,265 INFO Thread-11 (_thread_body):72094 [sender.py:transition_state():613] send defer: 11 +2024-05-14 11:42:13,266 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,266 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 11:42:13,267 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,267 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 11:42:13,267 INFO SenderThread:72094 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 11:42:13,267 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 12 +2024-05-14 11:42:13,267 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,267 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 11:42:13,267 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,267 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 11:42:13,267 INFO SenderThread:72094 [file_stream.py:finish():601] file stream finish called +2024-05-14 11:42:13,362 INFO SenderThread:72094 [file_stream.py:finish():605] file stream finish is done +2024-05-14 11:42:13,362 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 13 +2024-05-14 11:42:13,363 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,363 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 11:42:13,363 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,363 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 11:42:13,363 INFO SenderThread:72094 [sender.py:transition_state():613] send defer: 14 +2024-05-14 11:42:13,363 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,363 DEBUG SenderThread:72094 [sender.py:send():378] send: final +2024-05-14 11:42:13,363 INFO HandlerThread:72094 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 11:42:13,363 DEBUG SenderThread:72094 [sender.py:send():378] send: footer +2024-05-14 11:42:13,364 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,364 INFO SenderThread:72094 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 11:42:13,364 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:13,364 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:13,364 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:13,365 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:13,365 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 11:42:13,365 DEBUG SenderThread:72094 [sender.py:send_request():405] send_request: server_info +2024-05-14 11:42:13,366 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 11:42:13,366 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 11:42:13,366 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 11:42:13,430 INFO MainThread:72094 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 11:42:13,430 INFO MainThread:72094 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 11:42:13,430 INFO MainThread:72094 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 11:42:13,430 DEBUG HandlerThread:72094 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 11:42:13,430 INFO HandlerThread:72094 [handler.py:finish():882] shutting down handler +2024-05-14 11:42:14,365 INFO WriterThread:72094 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/run-7ou0qdee.wandb +2024-05-14 11:42:14,430 INFO SenderThread:72094 [sender.py:finish():1545] shutting down sender +2024-05-14 11:42:14,430 INFO SenderThread:72094 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 11:42:14,430 INFO SenderThread:72094 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..531e978ef4c96338844dbc3003b70cd56dd57587 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 11:41:40,290 INFO MainThread:70759 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 11:41:40,290 INFO MainThread:70759 [wandb_setup.py:_flush():76] Configure stats pid to 70759 +2024-05-14 11:41:40,290 INFO MainThread:70759 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 11:41:40,290 INFO MainThread:70759 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 11:41:40,290 INFO MainThread:70759 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 11:41:40,291 WARNING MainThread:70759 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug.log +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/logs/debug-internal.log +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:init():560] calling init triggers +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:init():610] starting backend +2024-05-14 11:41:40,291 INFO MainThread:70759 [wandb_init.py:init():614] setting up manager +2024-05-14 11:41:40,292 INFO MainThread:70759 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 11:41:40,293 INFO MainThread:70759 [wandb_init.py:init():622] backend started and connected +2024-05-14 11:41:40,298 INFO MainThread:70759 [wandb_init.py:init():711] updated telemetry +2024-05-14 11:41:40,313 INFO MainThread:70759 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 11:41:40,554 INFO MainThread:70759 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 11:41:40,633 INFO MainThread:70759 [wandb_run.py:_on_init():2405] got version response +2024-05-14 11:41:40,633 INFO MainThread:70759 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 11:41:40,819 INFO MainThread:70759 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 11:41:40,819 INFO MainThread:70759 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 11:41:40,819 INFO MainThread:70759 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 11:41:40,819 INFO MainThread:70759 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 11:41:40,820 INFO MainThread:70759 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 11:42:14,431 WARNING MsgRouterThr:70759 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/run-7ou0qdee.wandb b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/run-7ou0qdee.wandb new file mode 100644 index 0000000000000000000000000000000000000000..1b5b6f2c6b2aa01126b83b6a70cd3a21d6cd136a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_114140-7ou0qdee/run-7ou0qdee.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c1ac4e49694bcc5d68ae5be2d5f70da2044a198 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715686900 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..ad739929d521c986ec918f4335618896dfacb78b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:11:41:40,718 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:11:41:46,726 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:11:41:46,727 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:11:41:46,728 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:11:41:58,799 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:11:41:58,800 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 11:41:58,429] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:11:42:00,015 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:42:00,015 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:42:00,033 INFO [task.py:395] Building contexts for indiccopa-hi on rank 5... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 110221.03it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:11:42:01,858 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..b10d8a2bcf86b41c43c778dbd877fc6db8153e44 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T11:41:40.582314", + "startedAt": "2024-05-14T11:41:40.132701", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=global_step100" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.1245789473687, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3210.728, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3387.845, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3387.86, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3349.326, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3212.52, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3216.884, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 77.68967056274414 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..4841bc4504e219db83b702d09e68cfaa8fa95063 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 28}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..d3ae6605e9b2769b61fa1f3ac193f62817ac1b17 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-14 11:41:40,160 INFO StreamThr :72055 [internal.py:wandb_internal():85] W&B internal server running at pid: 72055, started at: 2024-05-14 11:41:40.160269 +2024-05-14 11:41:40,162 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status +2024-05-14 11:41:40,163 INFO WriterThread:72055 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/run-epmtxgfa.wandb +2024-05-14 11:41:40,165 DEBUG SenderThread:72055 [sender.py:send():378] send: header +2024-05-14 11:41:40,189 DEBUG SenderThread:72055 [sender.py:send():378] send: run +2024-05-14 11:41:40,416 INFO SenderThread:72055 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files +2024-05-14 11:41:40,416 INFO SenderThread:72055 [sender.py:_start_run_threads():1123] run started: epmtxgfa with start time 1715686900.160097 +2024-05-14 11:41:40,423 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 11:41:40,423 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: check_version +2024-05-14 11:41:40,510 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 11:41:40,512 DEBUG HandlerThread:72055 [system_info.py:__init__():26] System info init +2024-05-14 11:41:40,512 DEBUG HandlerThread:72055 [system_info.py:__init__():41] System info init done +2024-05-14 11:41:40,512 INFO HandlerThread:72055 [system_monitor.py:start():194] Starting system monitor +2024-05-14 11:41:40,512 INFO SystemMonitor:72055 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 11:41:40,513 INFO HandlerThread:72055 [system_monitor.py:probe():214] Collecting system info +2024-05-14 11:41:40,513 INFO SystemMonitor:72055 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 11:41:40,513 INFO SystemMonitor:72055 [interfaces.py:start():188] Started disk monitoring +2024-05-14 11:41:40,514 INFO SystemMonitor:72055 [interfaces.py:start():188] Started memory monitoring +2024-05-14 11:41:40,514 INFO SystemMonitor:72055 [interfaces.py:start():188] Started network monitoring +2024-05-14 11:41:40,582 DEBUG HandlerThread:72055 [system_info.py:probe():150] Probing system +2024-05-14 11:41:40,593 DEBUG HandlerThread:72055 [system_info.py:_probe_git():135] Probing git +2024-05-14 11:41:40,615 ERROR HandlerThread:72055 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 11:41:40,615 DEBUG HandlerThread:72055 [system_info.py:_probe_git():143] Probing git done +2024-05-14 11:41:40,615 DEBUG HandlerThread:72055 [system_info.py:probe():198] Probing system done +2024-05-14 11:41:40,615 DEBUG HandlerThread:72055 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T11:41:40.582314', 'startedAt': '2024-05-14T11:41:40.132701', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=global_step100'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.1245789473687, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3210.728, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3387.845, 'min': 800.0, 'max': 3400.0}, {'current': 3387.86, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3349.326, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3212.52, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3216.884, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 77.68967056274414}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 11:41:40,615 INFO HandlerThread:72055 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 11:41:40,615 INFO HandlerThread:72055 [system_monitor.py:probe():227] Publishing system info +2024-05-14 11:41:40,617 INFO HandlerThread:72055 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 11:41:40,620 DEBUG SenderThread:72055 [sender.py:send():378] send: files +2024-05-14 11:41:40,620 INFO SenderThread:72055 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 11:41:40,715 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 11:41:40,715 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: python_packages +2024-05-14 11:41:40,715 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 11:41:40,716 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: stop_status +2024-05-14 11:41:40,862 DEBUG SenderThread:72055 [sender.py:send():378] send: telemetry +2024-05-14 11:41:41,162 INFO wandb-upload_0:72055 [upload_job.py:push():130] Uploaded file /tmp/tmp816cz92nwandb/9pwfnurd-wandb-metadata.json +2024-05-14 11:41:41,418 INFO Thread-12 :72055 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:41:41,418 INFO Thread-12 :72055 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt +2024-05-14 11:41:41,418 INFO Thread-12 :72055 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-metadata.json +2024-05-14 11:41:43,419 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:41:45,864 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:41:47,422 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:41:51,729 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:41:55,441 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:41:55,716 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 11:41:55,716 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: stop_status +2024-05-14 11:41:56,805 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:41:59,451 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:00,453 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:01,459 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:01,859 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:02,465 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:03,467 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:04,469 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:07,347 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:09,389 DEBUG SenderThread:72055 [sender.py:send():378] send: exit +2024-05-14 11:42:09,389 INFO SenderThread:72055 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 11:42:09,389 INFO SenderThread:72055 [sender.py:send_exit():587] handling runtime: 28 +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:send_exit():593] send defer +2024-05-14 11:42:09,391 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,391 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 11:42:09,391 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 1 +2024-05-14 11:42:09,391 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,391 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 11:42:09,391 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 11:42:09,391 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 2 +2024-05-14 11:42:09,391 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,391 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 11:42:09,391 INFO HandlerThread:72055 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 11:42:09,392 DEBUG SystemMonitor:72055 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 11:42:09,392 INFO HandlerThread:72055 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 11:42:09,392 DEBUG SystemMonitor:72055 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 11:42:09,393 INFO HandlerThread:72055 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 11:42:09,393 DEBUG SystemMonitor:72055 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 11:42:09,393 INFO HandlerThread:72055 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 11:42:09,395 INFO HandlerThread:72055 [interfaces.py:finish():200] Joined network monitor +2024-05-14 11:42:09,395 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,395 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 11:42:09,395 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 3 +2024-05-14 11:42:09,395 DEBUG SenderThread:72055 [sender.py:send():378] send: stats +2024-05-14 11:42:09,395 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,396 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 11:42:09,396 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,396 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 11:42:09,396 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 4 +2024-05-14 11:42:09,396 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,396 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 11:42:09,396 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,396 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 11:42:09,396 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 5 +2024-05-14 11:42:09,397 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,397 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 11:42:09,397 DEBUG SenderThread:72055 [sender.py:send():378] send: summary +2024-05-14 11:42:09,397 INFO SenderThread:72055 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 11:42:09,397 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,397 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 11:42:09,398 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 6 +2024-05-14 11:42:09,398 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,398 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 11:42:09,398 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,398 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 11:42:09,400 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 11:42:09,488 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 7 +2024-05-14 11:42:09,489 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:09,489 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 11:42:09,489 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:09,489 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 11:42:09,493 INFO Thread-12 :72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml +2024-05-14 11:42:09,493 INFO Thread-12 :72055 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json +2024-05-14 11:42:10,389 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:12,446 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 8 +2024-05-14 11:42:12,446 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:12,446 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,446 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 11:42:12,446 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,446 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 11:42:12,446 INFO SenderThread:72055 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 11:42:12,447 INFO SenderThread:72055 [job_builder.py:_get_source_type():576] no source found +2024-05-14 11:42:12,447 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 9 +2024-05-14 11:42:12,447 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,447 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 11:42:12,447 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,447 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 11:42:12,447 INFO SenderThread:72055 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 11:42:12,495 INFO SenderThread:72055 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:12,495 INFO SenderThread:72055 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files +2024-05-14 11:42:12,496 INFO SenderThread:72055 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml config.yaml +2024-05-14 11:42:12,496 INFO SenderThread:72055 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt requirements.txt +2024-05-14 11:42:12,496 INFO SenderThread:72055 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json wandb-summary.json +2024-05-14 11:42:12,496 INFO SenderThread:72055 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-metadata.json wandb-metadata.json +2024-05-14 11:42:12,496 INFO SenderThread:72055 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log output.log +2024-05-14 11:42:12,498 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 10 +2024-05-14 11:42:12,498 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:12,498 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 11:42:12,502 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:12,502 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 11:42:12,502 INFO SenderThread:72055 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 11:42:12,742 INFO wandb-upload_0:72055 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/config.yaml +2024-05-14 11:42:12,889 INFO wandb-upload_3:72055 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/output.log +2024-05-14 11:42:13,006 INFO wandb-upload_2:72055 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/wandb-summary.json +2024-05-14 11:42:13,008 INFO wandb-upload_1:72055 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/files/requirements.txt +2024-05-14 11:42:13,208 INFO Thread-11 (_thread_body):72055 [sender.py:transition_state():613] send defer: 11 +2024-05-14 11:42:13,209 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,209 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 11:42:13,209 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,209 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 11:42:13,209 INFO SenderThread:72055 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 11:42:13,209 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 12 +2024-05-14 11:42:13,210 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,210 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 11:42:13,210 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,210 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 11:42:13,210 INFO SenderThread:72055 [file_stream.py:finish():601] file stream finish called +2024-05-14 11:42:13,284 INFO SenderThread:72055 [file_stream.py:finish():605] file stream finish is done +2024-05-14 11:42:13,284 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 13 +2024-05-14 11:42:13,284 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,284 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 11:42:13,285 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,285 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 11:42:13,285 INFO SenderThread:72055 [sender.py:transition_state():613] send defer: 14 +2024-05-14 11:42:13,285 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: defer +2024-05-14 11:42:13,285 INFO HandlerThread:72055 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 11:42:13,285 DEBUG SenderThread:72055 [sender.py:send():378] send: final +2024-05-14 11:42:13,285 DEBUG SenderThread:72055 [sender.py:send():378] send: footer +2024-05-14 11:42:13,285 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: defer +2024-05-14 11:42:13,285 INFO SenderThread:72055 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 11:42:13,286 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:13,286 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:13,286 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 11:42:13,286 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 11:42:13,286 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 11:42:13,286 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 11:42:13,286 DEBUG SenderThread:72055 [sender.py:send_request():405] send_request: server_info +2024-05-14 11:42:13,287 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 11:42:13,288 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 11:42:13,339 INFO MainThread:72055 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 11:42:13,339 INFO MainThread:72055 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 11:42:13,339 INFO MainThread:72055 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 11:42:13,339 DEBUG HandlerThread:72055 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 11:42:13,340 INFO HandlerThread:72055 [handler.py:finish():882] shutting down handler +2024-05-14 11:42:14,286 INFO WriterThread:72055 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/run-epmtxgfa.wandb +2024-05-14 11:42:14,339 INFO SenderThread:72055 [sender.py:finish():1545] shutting down sender +2024-05-14 11:42:14,339 INFO SenderThread:72055 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 11:42:14,339 INFO SenderThread:72055 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..a021428fd18084fee7dbab0b414d64ff88f62e0a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 11:41:40,156 INFO MainThread:70757 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Configure stats pid to 70757 +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 11:41:40,157 WARNING MainThread:70757 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug.log +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/logs/debug-internal.log +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:init():560] calling init triggers +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:init():610] starting backend +2024-05-14 11:41:40,157 INFO MainThread:70757 [wandb_init.py:init():614] setting up manager +2024-05-14 11:41:40,159 INFO MainThread:70757 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 11:41:40,159 INFO MainThread:70757 [wandb_init.py:init():622] backend started and connected +2024-05-14 11:41:40,162 INFO MainThread:70757 [wandb_init.py:init():711] updated telemetry +2024-05-14 11:41:40,188 INFO MainThread:70757 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 11:41:40,422 INFO MainThread:70757 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 11:41:40,506 INFO MainThread:70757 [wandb_run.py:_on_init():2405] got version response +2024-05-14 11:41:40,506 INFO MainThread:70757 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 11:41:40,715 INFO MainThread:70757 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 11:41:40,715 INFO MainThread:70757 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 11:41:40,715 INFO MainThread:70757 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 11:41:40,715 INFO MainThread:70757 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 11:41:40,716 INFO MainThread:70757 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 11:42:14,340 WARNING MsgRouterThr:70757 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/run-epmtxgfa.wandb b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/run-epmtxgfa.wandb new file mode 100644 index 0000000000000000000000000000000000000000..f2ef35241dd6006a45e36d934b3a4c57facfc097 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_114140-epmtxgfa/run-epmtxgfa.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a81e7c0eda0613364bf8ab01b84a5a6fcfbb9134 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715687084 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/output.log b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..4a1c4c6fff4cc25be1f9b8b9cfc4a9227d143997 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:11:44:44,812 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:11:44:49,674 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:11:44:49,676 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:11:44:49,676 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:11:44:58,448 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:11:44:58,448 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 11:44:58,004] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:11:44:59,714 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:44:59,715 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:44:59,733 INFO [task.py:395] Building contexts for indiccopa-hi on rank 5... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 106377.28it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:11:45:05,430 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..f97bfbe43401ee851b2b689eb650f147f8bb41d3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 29}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21fd4047d5e380a5e90494b66742c00130182715 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716403361 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..f8780fb9607aa61c8581f7bcbdd651da6a37b9c6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:42:41,996 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:42:50,532 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:42:50,533 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:42:50,534 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-22:18:42:52,829 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..48acd54a2f39155b5539b2bdb7a78f816939a85d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:42:41.794296", + "startedAt": "2024-05-22T18:42:41.257121", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2361.0589124999997, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63813400268555 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..7f686649d21285f8be674fcbefbb9769f620fe2b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 18:42:41,286 INFO StreamThr :810 [internal.py:wandb_internal():85] W&B internal server running at pid: 810, started at: 2024-05-22 18:42:41.282976 +2024-05-22 18:42:41,289 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:42:41,290 INFO WriterThread:810 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/run-s05yasij.wandb +2024-05-22 18:42:41,292 DEBUG SenderThread:810 [sender.py:send():378] send: header +2024-05-22 18:42:41,295 DEBUG SenderThread:810 [sender.py:send():378] send: run +2024-05-22 18:42:41,602 INFO SenderThread:810 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files +2024-05-22 18:42:41,602 INFO SenderThread:810 [sender.py:_start_run_threads():1123] run started: s05yasij with start time 1716403361.282836 +2024-05-22 18:42:41,605 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:42:41,605 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:42:41,717 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:42:41,719 DEBUG HandlerThread:810 [system_info.py:__init__():26] System info init +2024-05-22 18:42:41,719 DEBUG HandlerThread:810 [system_info.py:__init__():41] System info init done +2024-05-22 18:42:41,719 INFO HandlerThread:810 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:42:41,719 INFO SystemMonitor:810 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:42:41,719 INFO HandlerThread:810 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:42:41,726 INFO SystemMonitor:810 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:42:41,727 INFO SystemMonitor:810 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:42:41,732 INFO SystemMonitor:810 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:42:41,733 INFO SystemMonitor:810 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:42:41,794 DEBUG HandlerThread:810 [system_info.py:probe():150] Probing system +2024-05-22 18:42:41,797 DEBUG HandlerThread:810 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:42:41,807 ERROR HandlerThread:810 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:42:41,807 DEBUG HandlerThread:810 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:42:41,807 DEBUG HandlerThread:810 [system_info.py:probe():198] Probing system done +2024-05-22 18:42:41,807 DEBUG HandlerThread:810 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:42:41.794296', 'startedAt': '2024-05-22T18:42:41.257121', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2361.0589124999997, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63813400268555}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:42:41,807 INFO HandlerThread:810 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:42:41,807 INFO HandlerThread:810 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:42:41,810 INFO HandlerThread:810 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:42:41,816 DEBUG SenderThread:810 [sender.py:send():378] send: files +2024-05-22 18:42:41,816 INFO SenderThread:810 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:42:41,990 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:42:41,990 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:42:41,992 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:42:41,992 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:42:42,121 DEBUG SenderThread:810 [sender.py:send():378] send: telemetry +2024-05-22 18:42:42,412 INFO wandb-upload_0:810 [upload_job.py:push():130] Uploaded file /tmp/tmpporej3jbwandb/fidsckku-wandb-metadata.json +2024-05-22 18:42:42,604 INFO Thread-12 :810 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-metadata.json +2024-05-22 18:42:42,605 INFO Thread-12 :810 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt +2024-05-22 18:42:42,605 INFO Thread-12 :810 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log +2024-05-22 18:42:44,604 INFO Thread-12 :810 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log +2024-05-22 18:42:47,123 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:42:52,535 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:42:52,611 INFO Thread-12 :810 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log +2024-05-22 18:42:52,839 DEBUG SenderThread:810 [sender.py:send():378] send: exit +2024-05-22 18:42:52,839 INFO SenderThread:810 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:42:52,839 INFO SenderThread:810 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:42:52,840 INFO SenderThread:810 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:42:52,841 INFO SenderThread:810 [sender.py:send_exit():593] send defer +2024-05-22 18:42:52,841 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,841 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:42:52,841 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,841 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:42:52,841 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:42:52,841 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,841 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:42:52,841 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,841 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:42:52,841 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:42:52,841 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,841 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:42:52,841 INFO HandlerThread:810 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:42:52,841 DEBUG SystemMonitor:810 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:42:52,841 DEBUG SystemMonitor:810 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:42:52,842 DEBUG SystemMonitor:810 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:42:52,844 INFO HandlerThread:810 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:42:52,845 INFO HandlerThread:810 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:42:52,845 INFO HandlerThread:810 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:42:52,845 INFO HandlerThread:810 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:42:52,845 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,845 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:42:52,845 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:42:52,846 DEBUG SenderThread:810 [sender.py:send():378] send: stats +2024-05-22 18:42:52,847 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,847 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:42:52,847 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,847 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:42:52,847 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:42:52,847 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,847 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:42:52,847 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,847 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:42:52,847 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:42:52,847 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,847 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:42:52,847 DEBUG SenderThread:810 [sender.py:send():378] send: summary +2024-05-22 18:42:52,848 INFO SenderThread:810 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:42:52,848 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,848 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:42:52,848 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:42:52,849 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,849 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:42:52,849 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,849 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:42:52,853 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:42:52,924 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:42:52,925 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:52,925 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:42:52,925 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:52,925 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:42:53,613 INFO Thread-12 :810 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml +2024-05-22 18:42:53,613 INFO Thread-12 :810 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json +2024-05-22 18:42:53,839 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:42:54,146 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:42:54,146 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:42:54,146 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:54,146 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:42:54,146 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:54,146 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:42:54,146 INFO SenderThread:810 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:42:54,147 INFO SenderThread:810 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:42:54,147 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:42:54,147 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:54,147 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:42:54,147 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:54,147 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:42:54,147 INFO SenderThread:810 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:42:54,614 INFO SenderThread:810 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log +2024-05-22 18:42:54,614 INFO SenderThread:810 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files +2024-05-22 18:42:54,615 INFO SenderThread:810 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:42:54,615 INFO SenderThread:810 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt requirements.txt +2024-05-22 18:42:54,615 INFO SenderThread:810 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log output.log +2024-05-22 18:42:54,617 INFO SenderThread:810 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml config.yaml +2024-05-22 18:42:54,619 INFO SenderThread:810 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json wandb-summary.json +2024-05-22 18:42:54,619 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:42:54,621 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:54,621 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:42:54,622 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:54,622 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:42:54,622 INFO SenderThread:810 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:42:54,839 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:42:54,839 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:42:54,942 INFO wandb-upload_0:810 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/requirements.txt +2024-05-22 18:42:55,196 INFO wandb-upload_2:810 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/config.yaml +2024-05-22 18:42:55,224 INFO wandb-upload_1:810 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/output.log +2024-05-22 18:42:55,229 INFO wandb-upload_3:810 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/files/wandb-summary.json +2024-05-22 18:42:55,429 INFO Thread-11 (_thread_body):810 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:42:55,429 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:55,429 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:42:55,430 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:55,430 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:42:55,430 INFO SenderThread:810 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:42:55,430 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:42:55,430 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:55,430 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:42:55,430 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:55,430 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:42:55,430 INFO SenderThread:810 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:42:55,506 INFO SenderThread:810 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:42:55,506 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:42:55,506 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:55,506 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:42:55,507 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:55,507 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:42:55,507 INFO SenderThread:810 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:42:55,507 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:42:55,507 INFO HandlerThread:810 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:42:55,507 DEBUG SenderThread:810 [sender.py:send():378] send: final +2024-05-22 18:42:55,507 DEBUG SenderThread:810 [sender.py:send():378] send: footer +2024-05-22 18:42:55,507 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: defer +2024-05-22 18:42:55,507 INFO SenderThread:810 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:42:55,508 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:42:55,508 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:42:55,509 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:42:55,509 DEBUG SenderThread:810 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:42:55,562 INFO MainThread:810 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:42:55,562 INFO MainThread:810 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:42:55,562 INFO MainThread:810 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:42:55,563 DEBUG HandlerThread:810 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:42:55,563 INFO HandlerThread:810 [handler.py:finish():882] shutting down handler +2024-05-22 18:42:56,509 INFO WriterThread:810 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/run-s05yasij.wandb +2024-05-22 18:42:56,562 INFO SenderThread:810 [sender.py:finish():1545] shutting down sender +2024-05-22 18:42:56,562 INFO SenderThread:810 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:42:56,562 INFO SenderThread:810 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..b8163b6f74d7ad32ff849b37183a7ac392ef94cc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Configure stats pid to 654 +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:42:41,277 WARNING MainThread:654 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug.log +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/logs/debug-internal.log +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:init():610] starting backend +2024-05-22 18:42:41,277 INFO MainThread:654 [wandb_init.py:init():614] setting up manager +2024-05-22 18:42:41,281 INFO MainThread:654 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:42:41,282 INFO MainThread:654 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:42:41,286 INFO MainThread:654 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:42:41,295 INFO MainThread:654 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:42:41,604 INFO MainThread:654 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:42:41,710 INFO MainThread:654 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:42:41,711 INFO MainThread:654 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:42:41,991 INFO MainThread:654 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:42:41,991 INFO MainThread:654 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:42:41,991 INFO MainThread:654 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:42:41,991 INFO MainThread:654 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:42:41,994 INFO MainThread:654 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:42:56,563 WARNING MsgRouterThr:654 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/run-s05yasij.wandb b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/run-s05yasij.wandb new file mode 100644 index 0000000000000000000000000000000000000000..7954172b918de34a1284dec72d49a3f44e752c99 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_184241-s05yasij/run-s05yasij.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7b8d95073bd497f631a88b0fa91195a10eacc5e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716404384 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..5f1670870e5f2c4e295a27201dd92f92333bea1e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:59:45,137 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:59:53,626 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:59:53,627 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:59:53,628 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000'} +2024-05-22:18:59:55,913 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..24c28489fae48af05aaa93916011941c64de59f7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:59:44.922176", + "startedAt": "2024-05-22T18:59:44.408977", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2347.7606062500004, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3321.773, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3321.767, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.871, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.6415901184082 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb new file mode 100644 index 0000000000000000000000000000000000000000..b83f632cac44a44b94a545b1b4880abf8d6429f6 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e71e20380aa956ec25e794bcfe867e1729b04ed --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716467425 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..db4895730d6371c853ca767726400dc8bc46a9ce --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:30:26,164 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:30:34,660 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:30:34,660 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:30:34,661 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000'} +2024-05-23:12:30:36,967 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1d6bc537eb8e55b0089951b5710f0c49077bcb8f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:30:25.961663", + "startedAt": "2024-05-23T12:30:25.436672", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.28635, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.58185195922852 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..2f03499d3ab3635b40edc15f195474937971307f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 12:30:25,458 INFO StreamThr :1323 [internal.py:wandb_internal():85] W&B internal server running at pid: 1323, started at: 2024-05-23 12:30:25.456500 +2024-05-23 12:30:25,463 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:30:25,464 INFO WriterThread:1323 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/run-u0zri7nh.wandb +2024-05-23 12:30:25,466 DEBUG SenderThread:1323 [sender.py:send():378] send: header +2024-05-23 12:30:25,470 DEBUG SenderThread:1323 [sender.py:send():378] send: run +2024-05-23 12:30:25,762 INFO SenderThread:1323 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files +2024-05-23 12:30:25,762 INFO SenderThread:1323 [sender.py:_start_run_threads():1123] run started: u0zri7nh with start time 1716467425.456952 +2024-05-23 12:30:25,766 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:30:25,766 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:30:25,887 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:30:25,890 DEBUG HandlerThread:1323 [system_info.py:__init__():26] System info init +2024-05-23 12:30:25,890 DEBUG HandlerThread:1323 [system_info.py:__init__():41] System info init done +2024-05-23 12:30:25,890 INFO HandlerThread:1323 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:30:25,890 INFO SystemMonitor:1323 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:30:25,890 INFO HandlerThread:1323 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:30:25,897 INFO SystemMonitor:1323 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:30:25,897 INFO SystemMonitor:1323 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:30:25,897 INFO SystemMonitor:1323 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:30:25,898 INFO SystemMonitor:1323 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:30:25,961 DEBUG HandlerThread:1323 [system_info.py:probe():150] Probing system +2024-05-23 12:30:25,964 DEBUG HandlerThread:1323 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:30:25,975 ERROR HandlerThread:1323 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:30:25,975 DEBUG HandlerThread:1323 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:30:25,975 DEBUG HandlerThread:1323 [system_info.py:probe():198] Probing system done +2024-05-23 12:30:25,975 DEBUG HandlerThread:1323 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:30:25.961663', 'startedAt': '2024-05-23T12:30:25.436672', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.28635, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.58185195922852}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 12:30:25,975 INFO HandlerThread:1323 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:30:25,975 INFO HandlerThread:1323 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:30:25,978 INFO HandlerThread:1323 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:30:25,983 DEBUG SenderThread:1323 [sender.py:send():378] send: files +2024-05-23 12:30:25,984 INFO SenderThread:1323 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:30:26,158 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:30:26,158 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:30:26,159 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:30:26,160 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:30:26,267 DEBUG SenderThread:1323 [sender.py:send():378] send: telemetry +2024-05-23 12:30:26,563 INFO wandb-upload_0:1323 [upload_job.py:push():130] Uploaded file /tmp/tmpgv2lw9d3wandb/qwa670bz-wandb-metadata.json +2024-05-23 12:30:26,765 INFO Thread-12 :1323 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log +2024-05-23 12:30:26,765 INFO Thread-12 :1323 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt +2024-05-23 12:30:26,765 INFO Thread-12 :1323 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-metadata.json +2024-05-23 12:30:28,765 INFO Thread-12 :1323 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log +2024-05-23 12:30:31,272 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:30:36,661 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:30:36,771 INFO Thread-12 :1323 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log +2024-05-23 12:30:36,974 DEBUG SenderThread:1323 [sender.py:send():378] send: exit +2024-05-23 12:30:36,974 INFO SenderThread:1323 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:30:36,974 INFO SenderThread:1323 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:send_exit():593] send defer +2024-05-23 12:30:36,976 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,976 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:30:36,976 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:30:36,976 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,976 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:30:36,976 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:30:36,976 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:30:36,977 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,977 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:30:36,977 INFO HandlerThread:1323 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:30:36,977 DEBUG SystemMonitor:1323 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:30:36,977 DEBUG SystemMonitor:1323 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:30:36,977 DEBUG SystemMonitor:1323 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:30:36,979 INFO HandlerThread:1323 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:30:36,979 INFO HandlerThread:1323 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:30:36,979 INFO HandlerThread:1323 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:30:36,979 INFO HandlerThread:1323 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:30:36,980 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,980 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:30:36,980 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:30:36,980 DEBUG SenderThread:1323 [sender.py:send():378] send: stats +2024-05-23 12:30:36,981 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,981 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:30:36,981 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,981 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:30:36,981 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:30:36,981 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,981 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:30:36,982 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,982 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:30:36,982 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:30:36,982 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,982 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:30:36,982 DEBUG SenderThread:1323 [sender.py:send():378] send: summary +2024-05-23 12:30:36,983 INFO SenderThread:1323 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:30:36,983 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,983 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:30:36,983 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:30:36,983 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:36,983 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:30:36,983 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:36,983 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:30:36,988 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:30:37,058 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:30:37,058 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:37,058 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:30:37,058 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:37,058 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:30:37,772 INFO Thread-12 :1323 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml +2024-05-23 12:30:37,772 INFO Thread-12 :1323 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json +2024-05-23 12:30:37,974 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:30:38,294 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:30:38,294 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:30:38,294 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:38,295 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:30:38,295 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:38,295 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:30:38,295 INFO SenderThread:1323 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:30:38,295 INFO SenderThread:1323 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:30:38,295 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:30:38,295 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:38,295 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:30:38,296 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:38,296 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:30:38,296 INFO SenderThread:1323 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:30:38,773 INFO SenderThread:1323 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log +2024-05-23 12:30:38,774 INFO SenderThread:1323 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files +2024-05-23 12:30:38,774 INFO SenderThread:1323 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json wandb-summary.json +2024-05-23 12:30:38,774 INFO SenderThread:1323 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt requirements.txt +2024-05-23 12:30:38,776 INFO SenderThread:1323 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:30:38,777 INFO SenderThread:1323 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log output.log +2024-05-23 12:30:38,777 INFO SenderThread:1323 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml config.yaml +2024-05-23 12:30:38,777 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:30:38,777 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:38,777 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:30:38,779 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:38,779 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:30:38,779 INFO SenderThread:1323 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:30:38,974 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:30:38,975 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:30:39,010 INFO wandb-upload_0:1323 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/wandb-summary.json +2024-05-23 12:30:39,348 INFO wandb-upload_1:1323 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/requirements.txt +2024-05-23 12:30:39,374 INFO wandb-upload_2:1323 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/output.log +2024-05-23 12:30:39,382 INFO wandb-upload_3:1323 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/files/config.yaml +2024-05-23 12:30:39,582 INFO Thread-11 (_thread_body):1323 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:30:39,582 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:39,582 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:30:39,582 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:39,582 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:30:39,582 INFO SenderThread:1323 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:30:39,583 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:30:39,583 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:39,583 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:30:39,583 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:39,583 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:30:39,583 INFO SenderThread:1323 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:30:39,645 INFO SenderThread:1323 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:30:39,645 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:30:39,645 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:39,645 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:30:39,645 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:39,645 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:30:39,645 INFO SenderThread:1323 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:30:39,645 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:30:39,645 INFO HandlerThread:1323 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:30:39,646 DEBUG SenderThread:1323 [sender.py:send():378] send: final +2024-05-23 12:30:39,646 DEBUG SenderThread:1323 [sender.py:send():378] send: footer +2024-05-23 12:30:39,646 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: defer +2024-05-23 12:30:39,646 INFO SenderThread:1323 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:30:39,646 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:30:39,647 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:30:39,647 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:30:39,647 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:30:39,647 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:30:39,647 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:30:39,647 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:30:39,647 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:30:39,647 DEBUG SenderThread:1323 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:30:39,710 INFO MainThread:1323 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:30:39,710 INFO MainThread:1323 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:30:39,710 INFO MainThread:1323 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:30:39,710 DEBUG HandlerThread:1323 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:30:39,710 INFO HandlerThread:1323 [handler.py:finish():882] shutting down handler +2024-05-23 12:30:40,647 INFO WriterThread:1323 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/run-u0zri7nh.wandb +2024-05-23 12:30:40,710 INFO SenderThread:1323 [sender.py:finish():1545] shutting down sender +2024-05-23 12:30:40,710 INFO SenderThread:1323 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:30:40,710 INFO SenderThread:1323 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..d9dc36312f55bc755b79a0e5ee9f0ee3e016f7bc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:30:25,451 INFO MainThread:1168 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Configure stats pid to 1168 +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:30:25,452 WARNING MainThread:1168 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug.log +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/logs/debug-internal.log +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:init():610] starting backend +2024-05-23 12:30:25,452 INFO MainThread:1168 [wandb_init.py:init():614] setting up manager +2024-05-23 12:30:25,455 INFO MainThread:1168 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:30:25,456 INFO MainThread:1168 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:30:25,460 INFO MainThread:1168 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:30:25,469 INFO MainThread:1168 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:30:25,766 INFO MainThread:1168 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:30:25,881 INFO MainThread:1168 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:30:25,881 INFO MainThread:1168 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:30:26,159 INFO MainThread:1168 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:30:26,159 INFO MainThread:1168 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:30:26,159 INFO MainThread:1168 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:30:26,159 INFO MainThread:1168 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:30:26,162 INFO MainThread:1168 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:30:40,711 WARNING MsgRouterThr:1168 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/run-u0zri7nh.wandb b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/run-u0zri7nh.wandb new file mode 100644 index 0000000000000000000000000000000000000000..f6b4eed171351e1bb36ab69b2d163800fb473cf5 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_123025-u0zri7nh/run-u0zri7nh.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/config.yaml b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98815a368c02e4a79f96daa2d921e05fbee9aee3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/config.yaml @@ -0,0 +1,375 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.1 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717854473 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 13 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.1 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + indic_arc_challenge_hi: + task: indic_arc_challenge_hi + group: Cognitive-Lab/Indic-ARC-Challenge + dataset_path: Cognitive-Lab/Indic-ARC-Challenge + dataset_name: hi + test_split: test + doc_to_text: 'Question: {{translated_question}} + + Answer:' + doc_to_target: '{{translated_choices.label.index(answerKey)}}' + doc_to_choice: '{{translated_choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{translated_question}} + + Answer:' + metadata: + version: 1.0 + indic_arc_easy_hi: + task: indic_arc_easy_hi + group: Cognitive-Lab/Indic-ARC-Easy + dataset_path: Cognitive-Lab/Indic-ARC-Easy + dataset_name: hi + test_split: test + doc_to_text: 'Question: {{translated_question}} + + Answer:' + doc_to_target: '{{translated_choices.label.index(answerKey)}}' + doc_to_choice: '{{translated_choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{translated_question}} + + Answer:' + metadata: + version: 1.0 + indic_boolq_hi: + task: indic_boolq_hi + group: Cognitive-Lab/Indic-BoolQ + dataset_path: Cognitive-Lab/Indic-BoolQ + dataset_name: hi + validation_split: validation + doc_to_text: 'Passage: {translated_passage} + + Question: {translated_question.strip()} + + Answer:' + doc_to_target: answer + doc_to_choice: + - 'true' + - 'false' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step150000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/media/table/evaluation/eval_results_1_a01dd2f2138b92becdbb.table.json b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/media/table/evaluation/eval_results_1_a01dd2f2138b92becdbb.table.json new file mode 100644 index 0000000000000000000000000000000000000000..40fe28af50e1f8513908e2dd35f5a38654b9b99b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/media/table/evaluation/eval_results_1_a01dd2f2138b92becdbb.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.5035516969218626", "0.0141"], ["sst2", 1.0, "none", 0, "acc", "0.46674311926605505", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5282916213275299", "0.0116"], ["piqa", 1.0, "none", 0, "acc_norm", "0.4972796517954298", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.3161764705882353", "0.0230"], ["mrpc", 1.0, "none", 0, "f1", "0.0", "0.0000"], ["indic_boolq_hi", 1.0, "none", 0, "acc", "0.3782874617737003", "0.0085"], ["indic_arc_easy_hi", 1.0, "none", 0, "acc", "0.24873737373737373", "0.0089"], ["indic_arc_challenge_hi", 1.0, "none", 0, "acc", "0.2030716723549488", "0.0118"], ["copa", 1.0, "none", 0, "acc", "0.59", "0.0494"], ["boolq", 2.0, "none", 0, "acc", "0.3779816513761468", "0.0085"], ["arc_easy", 1.0, "none", 0, "acc", "0.26725589225589225", "0.0091"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2542087542087542", "0.0089"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/output.log b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..21b9914eba94e74e757e0f0530da196141af6b2e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240608_134753-jc54oi3l/files/output.log @@ -0,0 +1,737 @@ + +2024-06-08:13:47:54,328 INFO [__main__.py:251] Verbosity set to INFO +2024-06-08:13:48:03,469 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'indic_arc_challenge_hi', 'indic_arc_easy_hi', 'indic_boolq_hi', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-06-08:13:48:03,470 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-06-08:13:48:03,470 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step150000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer'} +2024-06-08:13:48:05,994 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +2024-06-08:13:48:38,439 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-06-08:13:48:38,440 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-06-08:13:48:40,148 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-06-08:13:48:40,148 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-06-08:13:48:42,787 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-06-08:13:48:42,787 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-06-08:13:48:44,247 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-06-08:13:48:44,247 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-06-08:13:48:45,938 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-06-08:13:48:45,939 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-06-08:13:48:45,939 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-06-08:13:48:45,939 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-06-08:13:48:51,418 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-06-08:13:48:51,418 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-06-08:13:49:04,808 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 68696.86it/s] +2024-06-08:13:49:04,894 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2483.29it/s] +2024-06-08:13:49:05,274 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1084.94it/s] +2024-06-08:13:49:07,044 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1866.80it/s] +2024-06-08:13:49:07,280 INFO [task.py:395] Building contexts for indic_boolq_hi on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 3150.71it/s] +2024-06-08:13:49:08,494 INFO [task.py:395] Building contexts for indic_arc_easy_hi on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1124.82it/s] +2024-06-08:13:49:10,846 INFO [task.py:395] Building contexts for indic_arc_challenge_hi on rank 0... +100%|██████████| 1172/1172 [00:01<00:00, 1120.97it/s] +2024-06-08:13:49:12,010 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 61753.59it/s] +2024-06-08:13:49:12,020 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1952.64it/s] +2024-06-08:13:49:13,828 INFO [task.py:395] Building contexts for arc_easy on rank 0... + +100%|██████████| 2376/2376 [00:02<00:00, 1044.43it/s] +2024-06-08:13:49:16,254 INFO [evaluator.py:379] Running loglikelihood requests +Token indices sequence length is longer than the specified maximum sequence length for this model (1333 > 1024). Running this sequence through the model will result in indexing errors +Running loglikelihood requests: 0%| | 0/45739 [00:00>> from transformers import ClapTextConfig, ClapTextModel + + >>> # Initializing a CLAP text configuration + >>> configuration = ClapTextConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = ClapTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clap_text_model" + + def __init__( + self, + vocab_size=50265, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=514, + type_vocab_size=1, + initializer_factor=1.0, + layer_norm_eps=1e-12, + projection_dim=512, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + position_embedding_type="absolute", + use_cache=True, + projection_hidden_act="relu", + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_factor = initializer_factor + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.projection_hidden_act = projection_hidden_act + self.projection_dim = projection_dim + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the text config dict if we are loading from ClapConfig + if config_dict.get("model_type") == "clap": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ClapAudioConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a + CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP + [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + window_size (`int`, *optional*, defaults to 8): + Image size of the spectrogram + num_mel_bins (`int`, *optional*, defaults to 64): + Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class. + spec_size (`int`, *optional*, defaults to 256): + Desired input size of the spectrogram that the model supports. It can be different from the output of the + `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size` + of the audio models. + hidden_act (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + patch_size (`int`, *optional*, defaults to 4): + Patch size for the audio spectrogram + patch_stride (`list`, *optional*, defaults to `[4, 4]`): + Patch stride for the audio spectrogram + num_classes (`int`, *optional*, defaults to 527): + Number of classes used for the head training + hidden_size (`int`, *optional*, defaults to 768): + Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's + output,which is sent to the projection MLP layer. + projection_dim (`int`, *optional*, defaults to 512): + Hidden size of the projection layer. + depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): + Depths used for the Swin Layers of the audio model + num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`): + Number of attention heads used for the Swin Layers of the audio model + enable_fusion (`bool`, *optional*, defaults to `False`): + Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the + best results. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the encoder. + fusion_type (`[type]`, *optional*): + Fusion type used for the patch fusion. + patch_embed_input_channels (`int`, *optional*, defaults to 1): + Number of channels used for the input spectrogram + flatten_patch_embeds (`bool`, *optional*, defaults to `True`): + Whether or not to flatten the patch embeddings + patch_embeds_hidden_size (`int`, *optional*, defaults to 96): + Hidden size of the patch embeddings. It is used as the number of output channels. + enable_patch_layer_norm (`bool`, *optional*, defaults to `True`): + Whether or not to enable layer normalization for the patch embeddings + drop_path_rate (`float`, *optional*, defaults to 0.0): + Drop path rate for the patch fusion + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether or not to add a bias to the query, key, value projections. + mlp_ratio (`float`, *optional*, defaults to 4.0): + Ratio of the mlp hidden dim to embedding dim. + aff_block_r (`int`, *optional*, defaults to 4): + downsize_ratio used in the AudioFF block + num_hidden_layers (`int`, *optional*, defaults to 4): + Number of hidden layers in the Transformer encoder. + projection_hidden_act (`str`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + layer_norm_eps (`[type]`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + + Example: + + ```python + >>> from transformers import ClapAudioConfig, ClapAudioModel + + >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration + >>> configuration = ClapAudioConfig() + + >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration + >>> model = ClapAudioModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clap_audio_model" + + def __init__( + self, + window_size=8, + num_mel_bins=64, + spec_size=256, + hidden_act="gelu", + patch_size=4, + patch_stride=[4, 4], + num_classes=527, + hidden_size=768, + projection_dim=512, + depths=[2, 2, 6, 2], + num_attention_heads=[4, 8, 16, 32], + enable_fusion=False, + hidden_dropout_prob=0.1, + fusion_type=None, + patch_embed_input_channels=1, + flatten_patch_embeds=True, + patch_embeds_hidden_size=96, + enable_patch_layer_norm=True, + drop_path_rate=0.0, + attention_probs_dropout_prob=0.0, + qkv_bias=True, + mlp_ratio=4.0, + aff_block_r=4, + num_hidden_layers=4, + projection_hidden_act="relu", + layer_norm_eps=1e-5, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + self.window_size = window_size + self.num_mel_bins = num_mel_bins + self.spec_size = spec_size + self.patch_size = patch_size + self.patch_stride = patch_stride + self.num_classes = num_classes + self.hidden_size = hidden_size + self.depths = depths + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.window_size = window_size + self.enable_fusion = enable_fusion + self.fusion_type = fusion_type + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.projection_dim = projection_dim + self.flatten_patch_embeds = flatten_patch_embeds + self.patch_embeds_hidden_size = patch_embeds_hidden_size + self.enable_patch_layer_norm = enable_patch_layer_norm + self.drop_path_rate = drop_path_rate + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.qkv_bias = qkv_bias + self.mlp_ratio = mlp_ratio + self.patch_embed_input_channels = patch_embed_input_channels + self.aff_block_r = aff_block_r + self.layer_norm_eps = layer_norm_eps + self.initializer_factor = initializer_factor + self.projection_hidden_act = projection_hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the audio config dict if we are loading from ClapConfig + if config_dict.get("model_type") == "clap": + config_dict = config_dict["audio_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class ClapConfig(PretrainedConfig): + r""" + [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate + a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a + configuration with the defaults will yield a similar configuration to that of the CLAP + [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ClapTextConfig`]. + audio_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`ClapAudioConfig`]. + logit_scale_init_value (`float`, *optional*, defaults to 14.29): + The inital value of the *logit_scale* paramter. Default is used as per the original CLAP implementation. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and audio projection layers. + projection_hidden_act (`str`, *optional*, defaults to `"relu"`): + Activation function for the projection layers. + initializer_factor (`float`, *optional*, defaults to 1.0): + Factor to scale the initialization of the model weights. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import ClapConfig, ClapModel + + >>> # Initializing a ClapConfig with laion-ai/base style configuration + >>> configuration = ClapConfig() + + >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration + >>> model = ClapModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig + >>> from transformers import ClapTextConfig, ClapAudioConfig + + >>> # Initializing a ClapText and ClapAudioConfig configuration + >>> config_text = ClapTextConfig() + >>> config_audio = ClapAudioConfig() + + >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio) + ```""" + + model_type = "clap" + + def __init__( + self, + text_config=None, + audio_config=None, + logit_scale_init_value=(1 / 0.07), + projection_dim=512, + projection_hidden_act="relu", + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + if text_config is None: + text_config = {} + logger.info("text_config is None. Initializing the ClapTextConfig with default values.") + + if audio_config is None: + audio_config = {} + logger.info("audio_config is None. initializing the ClapAudioConfig with default values.") + + self.text_config = ClapTextConfig(**text_config) + self.audio_config = ClapAudioConfig(**audio_config) + self.text_config.projection_dim = projection_dim + self.audio_config.projection_dim = projection_dim + + self.text_config.projection_hidden_act = projection_hidden_act + self.audio_config.projection_hidden_act = projection_hidden_act + + self.projection_dim = projection_dim + self.projection_hidden_act = projection_hidden_act + self.hidden_size = self.text_config.hidden_size + + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = initializer_factor + self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths) + + @classmethod + def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs): + r""" + Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model + configuration. + + Returns: + [`ClapConfig`]: An instance of a configuration object + """ + + return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..d422bc45ab3de00cd6df4de21ff6c7012ebb6559 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clap/convert_clap_original_pytorch_to_hf.py @@ -0,0 +1,133 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import re + +from laion_clap import CLAP_Module + +from transformers import AutoFeatureExtractor, ClapConfig, ClapModel + + +KEYS_TO_MODIFY_MAPPING = { + "text_branch": "text_model", + "audio_branch": "audio_model.audio_encoder", + "attn": "attention.self", + "self.proj": "output.dense", + "attention.self_mask": "attn_mask", + "mlp.fc1": "intermediate.dense", + "mlp.fc2": "output.dense", + "norm1": "layernorm_before", + "norm2": "layernorm_after", + "bn0": "batch_norm", +} + +processor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") + + +def init_clap(checkpoint_path, model_type, enable_fusion=False): + model = CLAP_Module( + amodel=model_type, + enable_fusion=enable_fusion, + ) + model.load_ckpt(checkpoint_path) + return model + + +def get_config_from_original(clap_model): + audio_config = { + "patch_embeds_hidden_size": clap_model.model.audio_branch.embed_dim, + "depths": clap_model.model.audio_branch.depths, + "hidden_size": clap_model.model.audio_projection[0].in_features, + } + + text_config = {"hidden_size": clap_model.model.text_branch.pooler.dense.in_features} + + return ClapConfig(audio_config=audio_config, text_config=text_config) + + +def rename_state_dict(state_dict): + model_state_dict = {} + + sequential_layers_pattern = r".*sequential.(\d+).*" + text_projection_pattern = r".*_projection.(\d+).*" + + for key, value in state_dict.items(): + # check if any key needs to be modified + for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): + if key_to_modify in key: + key = key.replace(key_to_modify, new_key) + + if re.match(sequential_layers_pattern, key): + # replace sequential layers with list + sequential_layer = re.match(sequential_layers_pattern, key).group(1) + + key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.") + elif re.match(text_projection_pattern, key): + projecton_layer = int(re.match(text_projection_pattern, key).group(1)) + + # Because in CLAP they use `nn.Sequential`... + transformers_projection_layer = 1 if projecton_layer == 0 else 2 + + key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.") + + if "audio" and "qkv" in key: + # split qkv into query key and value + mixed_qkv = value + qkv_dim = mixed_qkv.size(0) // 3 + + query_layer = mixed_qkv[:qkv_dim] + key_layer = mixed_qkv[qkv_dim : qkv_dim * 2] + value_layer = mixed_qkv[qkv_dim * 2 :] + + model_state_dict[key.replace("qkv", "query")] = query_layer + model_state_dict[key.replace("qkv", "key")] = key_layer + model_state_dict[key.replace("qkv", "value")] = value_layer + else: + model_state_dict[key] = value + + return model_state_dict + + +def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False): + clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion) + + clap_model.eval() + state_dict = clap_model.model.state_dict() + state_dict = rename_state_dict(state_dict) + + transformers_config = get_config_from_original(clap_model) + transformers_config.audio_config.enable_fusion = enable_fusion + model = ClapModel(transformers_config) + + # ignore the spectrogram embedding layer + model.load_state_dict(state_dict, strict=False) + + model.save_pretrained(pytorch_dump_folder_path) + transformers_config.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") + parser.add_argument("--model_type", default="HTSAT-tiny", type=str, help="Whether to enable fusion or not") + args = parser.parse_args() + + convert_clap_checkpoint( + args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py b/venv/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py new file mode 100644 index 0000000000000000000000000000000000000000..ce18fedd19b109ee9af3b6c7de964e6a217abeef --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clap/feature_extraction_clap.py @@ -0,0 +1,363 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for CLAP.""" + + +import copy +from typing import Any, Dict, List, Optional, Union + +import numpy as np +import torch + +from ...audio_utils import mel_filter_bank, spectrogram, window_function +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class ClapFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a CLAP feature extractor. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time + Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent. + + Args: + feature_size (`int`, *optional*, defaults to 64): + The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters + (`n_mels`). + sampling_rate (`int`, *optional*, defaults to 48000): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves + to warn users if the audio fed to the feature extractor does not have the same sampling rate. + hop_length (`int`,*optional*, defaults to 480): + Length of the overlaping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split + in smaller `frames` with a step of `hop_length` between each frame. + max_length_s (`int`, *optional*, defaults to 10): + The maximum input length of the model in seconds. This is used to pad the audio. + fft_window_size (`int`, *optional*, defaults to 1024): + Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency + resolution of the spectrogram. 400 means that the fourrier transform is computed on windows of 400 samples. + padding_value (`float`, *optional*, defaults to 0.0): + Padding value used to pad the audio. Should correspond to silences. + return_attention_mask (`bool`, *optional*, defaults to `False`): + Whether or not the model should return the attention masks coresponding to the input. + frequency_min (`float`, *optional*, defaults to 0): + The lowest frequency of interest. The STFT will not be computed for values below this. + frequency_max (`float`, *optional*, defaults to 14000): + The highest frequency of interest. The STFT will not be computed for values above this. + top_db (`float`, *optional*): + The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the + `audio_utils.power_to_db` function + truncation (`str`, *optional*, defaults to `"fusion"`): + Truncation pattern for long audio inputs. Two patterns are available: + - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a + downsampled version of the entire mel spectrogram. + If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy + of the original mel obtained from the padded audio. + - `rand_trunc` will select a random crop of the mel spectrogram. + padding (`str`, *optional*, defaults to `"repeatpad"`): + Padding pattern for shorter audio inputs. Three patterns were originally implemented: + - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. + - `repeat`: the audio is repeated and then cut to fit the `max_length` + - `pad`: the audio is padded. + """ + + model_input_names = ["input_features", "is_longer"] + + def __init__( + self, + feature_size=64, + sampling_rate=48_000, + hop_length=480, + max_length_s=10, + fft_window_size=1024, + padding_value=0.0, + return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask + frequency_min: float = 0, + frequency_max: float = 14_000, + top_db: int = None, + truncation: str = "fusion", + padding: str = "repeatpad", + **kwargs, + ): + super().__init__( + feature_size=feature_size, + sampling_rate=sampling_rate, + padding_value=padding_value, + return_attention_mask=return_attention_mask, + **kwargs, + ) + self.top_db = top_db + self.truncation = truncation + self.padding = padding + self.fft_window_size = fft_window_size + self.nb_frequency_bins = (fft_window_size >> 1) + 1 + self.hop_length = hop_length + self.max_length_s = max_length_s + self.nb_max_samples = max_length_s * sampling_rate + self.sampling_rate = sampling_rate + self.frequency_min = frequency_min + self.frequency_max = frequency_max + self.mel_filters = mel_filter_bank( + num_frequency_bins=self.nb_frequency_bins, + num_mel_filters=feature_size, + min_frequency=frequency_min, + max_frequency=frequency_max, + sampling_rate=sampling_rate, + norm=None, + mel_scale="htk", + ) + self.mel_filters_slaney = mel_filter_bank( + num_frequency_bins=self.nb_frequency_bins, + num_mel_filters=feature_size, + min_frequency=frequency_min, + max_frequency=frequency_max, + sampling_rate=sampling_rate, + norm="slaney", + mel_scale="slaney", + ) + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, excpet for the + mel filter banks, which do not need to be saved or printed as they are too long. + """ + output = copy.deepcopy(self.__dict__) + output["feature_extractor_type"] = self.__class__.__name__ + if "mel_filters" in output: + del output["mel_filters"] + if "mel_filters_slaney" in output: + del output["mel_filters_slaney"] + return output + + def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array] = None) -> np.ndarray: + """ + Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter + banks are used depending on the truncation pattern: + - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from + calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation` + is set to `"fusion"`. + - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used + `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original + implementation when the truncation mode is not `"fusion"`. + """ + log_mel_spectrogram = spectrogram( + waveform, + window_function(self.fft_window_size, "hann"), + frame_length=self.fft_window_size, + hop_length=self.hop_length, + power=2.0, + mel_filters=mel_filters, + log_mel="dB", + ) + return log_mel_spectrogram.T + + def _random_mel_fusion(self, mel, total_frames, chunk_frames): + ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3) + if len(ranges[1]) == 0: + # if the audio is too short, we just use the first chunk + ranges[1] = [0] + if len(ranges[2]) == 0: + # if the audio is too short, we just use the first chunk + ranges[2] = [0] + # randomly choose index for each part + idx_front = np.random.choice(ranges[0]) + idx_middle = np.random.choice(ranges[1]) + idx_back = np.random.choice(ranges[2]) + + mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :] + mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :] + mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :] + + mel = torch.tensor(mel[None, None, :]) + mel_shrink = torch.nn.functional.interpolate( + mel, size=[chunk_frames, 64], mode="bilinear", align_corners=False + ) + mel_shrink = mel_shrink[0][0].numpy() + mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0) + return mel_fusion + + def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array: + """ + Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments. + Four different path are possible: + - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram + will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram + are then stacked together. They will later be used for `feature_fusion`. + - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is + padded based on `padding`. + - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded + based on `padding`, and is repeated `4` times. + - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel + spectrogram will be computed on a random crop of the waveform. + + """ + if waveform.shape[0] > max_length: + if truncation == "rand_trunc": + longer = True + # random crop to max_length (for compatibility) -> this should be handled by self.pad + overflow = len(waveform) - max_length + idx = np.random.randint(0, overflow + 1) + waveform = waveform[idx : idx + max_length] + input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] + elif truncation == "fusion": + mel = self._np_extract_fbank_features(waveform, self.mel_filters) + chunk_frames = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed + total_frames = mel.shape[0] + if chunk_frames == total_frames: + # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. + # In this case, we just use the whole audio. + input_mel = np.stack([mel, mel, mel, mel], axis=0) + longer = False + else: + input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames) + longer = True + else: + raise NotImplementedError(f"data_truncating {truncation} not implemented") + + else: + longer = False + # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding + if waveform.shape[0] < max_length: + if padding == "repeat": + n_repeat = int(max_length / len(waveform)) + waveform = np.tile(waveform, n_repeat + 1)[:max_length] + if padding == "repeatpad": + n_repeat = int(max_length / len(waveform)) + waveform = np.tile(waveform, n_repeat) + waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0) + + if truncation == "fusion": + input_mel = self._np_extract_fbank_features(waveform, self.mel_filters) + input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0) + else: + input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] + + return input_mel, longer + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + truncation: str = None, + padding: Optional[str] = None, + max_length: Optional[int] = None, + sampling_rate: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not + stereo, i.e. single float per timestep. + truncation (`str`, *optional*): + Truncation pattern for long audio inputs. Two patterns are available: + - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and + a downsampled version of the entire mel spectrogram. + If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a + copy of the original mel obtained from the padded audio. + - `rand_trunc` will select a random crop of the mel spectrogram. + padding (`str`, *optional*): + Padding pattern for shorter audio inputs. Three patterns were originally implemented: + - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. + - `repeat`: the audio is repeated and then cut to fit the `max_length` + - `pad`: the audio is padded. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.np.array` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition + pipeline. + """ + truncation = truncation if truncation is not None else self.truncation + padding = padding if padding else self.padding + + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" + f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" + f" was sampled with {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 + if is_batched_numpy and len(raw_speech.shape) > 2: + raise ValueError(f"Only mono-channel audio is supported for input to {self}") + is_batched = is_batched_numpy or ( + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float64) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float64) + + # always return batch + if not is_batched: + raw_speech = [np.asarray(raw_speech)] + + # convert to mel spectrogram, truncate and pad if needed. + padded_inputs = [ + self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding) + for waveform in raw_speech + ] + + input_mel = [] + is_longer = [] + for mel, longer in padded_inputs: + input_mel.append(mel) + is_longer.append(longer) + + if truncation == "fusion" and sum(is_longer) == 0: + # if no audio is longer than 10s, then randomly select one audio to be longer + rand_idx = np.random.randint(0, len(input_mel)) + is_longer[rand_idx] = True + + if isinstance(input_mel[0], List): + input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel] + + # is_longer is a list of bool + is_longer = [[longer] for longer in is_longer] + + input_features = {"input_features": input_mel, "is_longer": is_longer} + input_features = BatchFeature(input_features) + + if return_tensors is not None: + input_features = input_features.convert_to_tensors(return_tensors) + + return input_features diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d27ee2f1719321f2c82d49bc4a794a96a3558c4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mra/__init__.py @@ -0,0 +1,68 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +# rely on isort to merge the imports +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available + + +_import_structure = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_mra"] = [ + "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", + "MraForMaskedLM", + "MraForMultipleChoice", + "MraForQuestionAnswering", + "MraForSequenceClassification", + "MraForTokenClassification", + "MraLayer", + "MraModel", + "MraPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_mra import ( + MRA_PRETRAINED_MODEL_ARCHIVE_LIST, + MraForMaskedLM, + MraForMultipleChoice, + MraForQuestionAnswering, + MraForSequenceClassification, + MraForTokenClassification, + MraLayer, + MraModel, + MraPreTrainedModel, + ) +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42fc8bf591698a1cf621d7f70bc0c86cefea8bdd Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f96f1f7ce15b186467a597b39d78e17cfe1f4b25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/convert_mra_pytorch_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/convert_mra_pytorch_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffa35df4c3a751b772eb4ddc752c377f037e1a9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/convert_mra_pytorch_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95965c3dc406a804d1ce1c979247d25ed8ff7dab Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/configuration_mra.py b/venv/lib/python3.10/site-packages/transformers/models/mra/configuration_mra.py new file mode 100644 index 0000000000000000000000000000000000000000..2b3bec041633eacb1718a8521edfe19e1997f655 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mra/configuration_mra.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" MRA model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class MraConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Mra + [uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50265): + Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`MraModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 1): + The vocabulary size of the `token_type_ids` passed when calling [`MraModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. + block_per_row (`int`, *optional*, defaults to 4): + Used to set the budget for the high resolution scale. + approx_mode (`str`, *optional*, defaults to `"full"`): + Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and + high resolution and `"sparse"` for only low resolution. + initial_prior_first_n_blocks (`int`, *optional*, defaults to 0): + The initial number of blocks for which high resolution is used. + initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0): + The number of diagonal blocks for which high resolution is used. + + Example: + + ```python + >>> from transformers import MraConfig, MraModel + + >>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration + >>> configuration = MraConfig() + + >>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration + >>> model = MraModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "mra" + + def __init__( + self, + vocab_size=50265, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=1, + initializer_range=0.02, + layer_norm_eps=1e-5, + position_embedding_type="absolute", + block_per_row=4, + approx_mode="full", + initial_prior_first_n_blocks=0, + initial_prior_diagonal_n_blocks=0, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.type_vocab_size = type_vocab_size + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.block_per_row = block_per_row + self.approx_mode = approx_mode + self.initial_prior_first_n_blocks = initial_prior_first_n_blocks + self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/convert_mra_pytorch_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mra/convert_mra_pytorch_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..f558f7c7bce3699b867702c56800f5bfe25cb89b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mra/convert_mra_pytorch_to_pytorch.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention""" + +import argparse + +import torch + +from transformers import MraConfig, MraForMaskedLM + + +def rename_key(orig_key): + if "model" in orig_key: + orig_key = orig_key.replace("model.", "") + if "norm1" in orig_key: + orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") + if "norm2" in orig_key: + orig_key = orig_key.replace("norm2", "output.LayerNorm") + if "norm" in orig_key: + orig_key = orig_key.replace("norm", "LayerNorm") + if "transformer" in orig_key: + layer_num = orig_key.split(".")[0].split("_")[-1] + orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") + if "mha.attn" in orig_key: + orig_key = orig_key.replace("mha.attn", "attention.self") + if "mha" in orig_key: + orig_key = orig_key.replace("mha", "attention") + if "W_q" in orig_key: + orig_key = orig_key.replace("W_q", "self.query") + if "W_k" in orig_key: + orig_key = orig_key.replace("W_k", "self.key") + if "W_v" in orig_key: + orig_key = orig_key.replace("W_v", "self.value") + if "ff.0" in orig_key: + orig_key = orig_key.replace("ff.0", "intermediate.dense") + if "ff.2" in orig_key: + orig_key = orig_key.replace("ff.2", "output.dense") + if "ff" in orig_key: + orig_key = orig_key.replace("ff", "output.dense") + if "mlm_class" in orig_key: + orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") + if "mlm" in orig_key: + orig_key = orig_key.replace("mlm", "cls.predictions.transform") + if "backbone.backbone.encoders" in orig_key: + orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer") + if "cls" not in orig_key: + orig_key = "mra." + orig_key + + return orig_key + + +def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if ("pooler" in key) or ("sen_class" in key): + continue + else: + orig_state_dict[rename_key(key)] = val + + orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] + orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 + + return orig_state_dict + + +def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path): + orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"] + config = MraConfig.from_json_file(mra_config_file) + model = MraForMaskedLM(config) + + new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) + + print(model.load_state_dict(new_state_dict)) + model.eval() + model.save_pretrained(pytorch_dump_path) + + print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help="The json file for Mra model config.", + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mra/modeling_mra.py b/venv/lib/python3.10/site-packages/transformers/models/mra/modeling_mra.py new file mode 100644 index 0000000000000000000000000000000000000000..846578997c4a845c99af8abd74b8ebcd70b130b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mra/modeling_mra.py @@ -0,0 +1,1480 @@ +# coding=utf-8 +# Copyright 2023 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch MRA model.""" + + +import math +from pathlib import Path +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.utils.cpp_extension import load + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_ninja_available, + is_torch_cuda_available, + logging, +) +from .configuration_mra import MraConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "uw-madison/mra-base-512-4" +_CONFIG_FOR_DOC = "MraConfig" +_TOKENIZER_FOR_DOC = "AutoTokenizer" + + +from ..deprecated._archive_maps import MRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +mra_cuda_kernel = None + + +def load_cuda_kernels(): + global mra_cuda_kernel + src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "mra" + + def append_root(files): + return [src_folder / file for file in files] + + src_files = append_root(["cuda_kernel.cu", "cuda_launch.cu", "torch_extension.cpp"]) + + mra_cuda_kernel = load("cuda_kernel", src_files, verbose=True) + + +def sparse_max(sparse_qk_prod, indices, query_num_block, key_num_block): + """ + Computes maximum values for softmax stability. + """ + if len(sparse_qk_prod.size()) != 4: + raise ValueError("sparse_qk_prod must be a 4-dimensional tensor.") + + if len(indices.size()) != 2: + raise ValueError("indices must be a 2-dimensional tensor.") + + if sparse_qk_prod.size(2) != 32: + raise ValueError("The size of the second dimension of sparse_qk_prod must be 32.") + + if sparse_qk_prod.size(3) != 32: + raise ValueError("The size of the third dimension of sparse_qk_prod must be 32.") + + index_vals = sparse_qk_prod.max(dim=-2).values.transpose(-1, -2) + index_vals = index_vals.contiguous() + + indices = indices.int() + indices = indices.contiguous() + + max_vals, max_vals_scatter = mra_cuda_kernel.index_max(index_vals, indices, query_num_block, key_num_block) + max_vals_scatter = max_vals_scatter.transpose(-1, -2)[:, :, None, :] + + return max_vals, max_vals_scatter + + +def sparse_mask(mask, indices, block_size=32): + """ + Converts attention mask to a sparse mask for high resolution logits. + """ + if len(mask.size()) != 2: + raise ValueError("mask must be a 2-dimensional tensor.") + + if len(indices.size()) != 2: + raise ValueError("indices must be a 2-dimensional tensor.") + + if mask.shape[0] != indices.shape[0]: + raise ValueError("mask and indices must have the same size in the zero-th dimension.") + + batch_size, seq_len = mask.shape + num_block = seq_len // block_size + + batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) + mask = mask.reshape(batch_size, num_block, block_size) + mask = mask[batch_idx[:, None], (indices % num_block).long(), :] + + return mask + + +def mm_to_sparse(dense_query, dense_key, indices, block_size=32): + """ + Performs Sampled Dense Matrix Multiplication. + """ + batch_size, query_size, dim = dense_query.size() + _, key_size, dim = dense_key.size() + + if query_size % block_size != 0: + raise ValueError("query_size (size of first dimension of dense_query) must be divisible by block_size.") + + if key_size % block_size != 0: + raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") + + dense_query = dense_query.reshape(batch_size, query_size // block_size, block_size, dim).transpose(-1, -2) + dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) + + if len(dense_query.size()) != 4: + raise ValueError("dense_query must be a 4-dimensional tensor.") + + if len(dense_key.size()) != 4: + raise ValueError("dense_key must be a 4-dimensional tensor.") + + if len(indices.size()) != 2: + raise ValueError("indices must be a 2-dimensional tensor.") + + if dense_query.size(3) != 32: + raise ValueError("The third dimension of dense_query must be 32.") + + if dense_key.size(3) != 32: + raise ValueError("The third dimension of dense_key must be 32.") + + dense_query = dense_query.contiguous() + dense_key = dense_key.contiguous() + + indices = indices.int() + indices = indices.contiguous() + + return mra_cuda_kernel.mm_to_sparse(dense_query, dense_key, indices.int()) + + +def sparse_dense_mm(sparse_query, indices, dense_key, query_num_block, block_size=32): + """ + Performs matrix multiplication of a sparse matrix with a dense matrix. + """ + batch_size, key_size, dim = dense_key.size() + + if key_size % block_size != 0: + raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.") + + if sparse_query.size(2) != block_size: + raise ValueError("The size of the second dimension of sparse_query must be equal to the block_size.") + + if sparse_query.size(3) != block_size: + raise ValueError("The size of the third dimension of sparse_query must be equal to the block_size.") + + dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2) + + if len(sparse_query.size()) != 4: + raise ValueError("sparse_query must be a 4-dimensional tensor.") + + if len(dense_key.size()) != 4: + raise ValueError("dense_key must be a 4-dimensional tensor.") + + if len(indices.size()) != 2: + raise ValueError("indices must be a 2-dimensional tensor.") + + if dense_key.size(3) != 32: + raise ValueError("The size of the third dimension of dense_key must be 32.") + + sparse_query = sparse_query.contiguous() + + indices = indices.int() + indices = indices.contiguous() + dense_key = dense_key.contiguous() + + dense_qk_prod = mra_cuda_kernel.sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) + dense_qk_prod = dense_qk_prod.transpose(-1, -2).reshape(batch_size, query_num_block * block_size, dim) + return dense_qk_prod + + +def transpose_indices(indices, dim_1_block, dim_2_block): + return ((indices % dim_2_block) * dim_1_block + torch.div(indices, dim_2_block, rounding_mode="floor")).long() + + +class MraSampledDenseMatMul(torch.autograd.Function): + @staticmethod + def forward(ctx, dense_query, dense_key, indices, block_size): + sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size) + ctx.save_for_backward(dense_query, dense_key, indices) + ctx.block_size = block_size + return sparse_qk_prod + + @staticmethod + def backward(ctx, grad): + dense_query, dense_key, indices = ctx.saved_tensors + block_size = ctx.block_size + query_num_block = dense_query.size(1) // block_size + key_num_block = dense_key.size(1) // block_size + indices_T = transpose_indices(indices, query_num_block, key_num_block) + grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block) + grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block) + return grad_query, grad_key, None, None + + @staticmethod + def operator_call(dense_query, dense_key, indices, block_size=32): + return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size) + + +class MraSparseDenseMatMul(torch.autograd.Function): + @staticmethod + def forward(ctx, sparse_query, indices, dense_key, query_num_block): + sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block) + ctx.save_for_backward(sparse_query, indices, dense_key) + ctx.query_num_block = query_num_block + return sparse_qk_prod + + @staticmethod + def backward(ctx, grad): + sparse_query, indices, dense_key = ctx.saved_tensors + query_num_block = ctx.query_num_block + key_num_block = dense_key.size(1) // sparse_query.size(-1) + indices_T = transpose_indices(indices, query_num_block, key_num_block) + grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block) + grad_query = mm_to_sparse(grad, dense_key, indices) + return grad_query, None, grad_key, None + + @staticmethod + def operator_call(sparse_query, indices, dense_key, query_num_block): + return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block) + + +class MraReduceSum: + @staticmethod + def operator_call(sparse_query, indices, query_num_block, key_num_block): + batch_size, num_block, block_size, _ = sparse_query.size() + + if len(sparse_query.size()) != 4: + raise ValueError("sparse_query must be a 4-dimensional tensor.") + + if len(indices.size()) != 2: + raise ValueError("indices must be a 2-dimensional tensor.") + + _, _, block_size, _ = sparse_query.size() + batch_size, num_block = indices.size() + + sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size) + + batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device) + global_idxes = ( + torch.div(indices, key_num_block, rounding_mode="floor").long() + batch_idx[:, None] * query_num_block + ).reshape(batch_size * num_block) + temp = torch.zeros( + (batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device + ) + output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size) + + output = output.reshape(batch_size, query_num_block * block_size) + return output + + +def get_low_resolution_logit(query, key, block_size, mask=None, value=None): + """ + Compute low resolution approximation. + """ + batch_size, seq_len, head_dim = query.size() + + num_block_per_row = seq_len // block_size + + value_hat = None + if mask is not None: + token_count = mask.reshape(batch_size, num_block_per_row, block_size).sum(dim=-1) + query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( + token_count[:, :, None] + 1e-6 + ) + key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( + token_count[:, :, None] + 1e-6 + ) + if value is not None: + value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / ( + token_count[:, :, None] + 1e-6 + ) + else: + token_count = block_size * torch.ones(batch_size, num_block_per_row, dtype=torch.float, device=query.device) + query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) + key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) + if value is not None: + value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2) + + low_resolution_logit = torch.matmul(query_hat, key_hat.transpose(-1, -2)) / math.sqrt(head_dim) + + low_resolution_logit_row_max = low_resolution_logit.max(dim=-1, keepdims=True).values + + if mask is not None: + low_resolution_logit = ( + low_resolution_logit - 1e4 * ((token_count[:, None, :] * token_count[:, :, None]) < 0.5).float() + ) + + return low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat + + +def get_block_idxes( + low_resolution_logit, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks +): + """ + Compute the indices of the subset of components to be used in the approximation. + """ + batch_size, total_blocks_per_row, _ = low_resolution_logit.shape + + if initial_prior_diagonal_n_blocks > 0: + offset = initial_prior_diagonal_n_blocks // 2 + temp_mask = torch.ones(total_blocks_per_row, total_blocks_per_row, device=low_resolution_logit.device) + diagonal_mask = torch.tril(torch.triu(temp_mask, diagonal=-offset), diagonal=offset) + low_resolution_logit = low_resolution_logit + diagonal_mask[None, :, :] * 5e3 + + if initial_prior_first_n_blocks > 0: + low_resolution_logit[:, :initial_prior_first_n_blocks, :] = ( + low_resolution_logit[:, :initial_prior_first_n_blocks, :] + 5e3 + ) + low_resolution_logit[:, :, :initial_prior_first_n_blocks] = ( + low_resolution_logit[:, :, :initial_prior_first_n_blocks] + 5e3 + ) + + top_k_vals = torch.topk( + low_resolution_logit.reshape(batch_size, -1), num_blocks, dim=-1, largest=True, sorted=False + ) + indices = top_k_vals.indices + + if approx_mode == "full": + threshold = top_k_vals.values.min(dim=-1).values + high_resolution_mask = (low_resolution_logit >= threshold[:, None, None]).float() + elif approx_mode == "sparse": + high_resolution_mask = None + else: + raise ValueError(f"{approx_mode} is not a valid approx_model value.") + + return indices, high_resolution_mask + + +def mra2_attention( + query, + key, + value, + mask, + num_blocks, + approx_mode, + block_size=32, + initial_prior_first_n_blocks=0, + initial_prior_diagonal_n_blocks=0, +): + """ + Use Mra to approximate self-attention. + """ + if mra_cuda_kernel is None: + return torch.zeros_like(query).requires_grad_() + + batch_size, num_head, seq_len, head_dim = query.size() + meta_batch = batch_size * num_head + + if seq_len % block_size != 0: + raise ValueError("sequence length must be divisible by the block_size.") + + num_block_per_row = seq_len // block_size + + query = query.reshape(meta_batch, seq_len, head_dim) + key = key.reshape(meta_batch, seq_len, head_dim) + value = value.reshape(meta_batch, seq_len, head_dim) + + if mask is not None: + query = query * mask[:, :, None] + key = key * mask[:, :, None] + value = value * mask[:, :, None] + + if approx_mode == "full": + low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat = get_low_resolution_logit( + query, key, block_size, mask, value + ) + elif approx_mode == "sparse": + with torch.no_grad(): + low_resolution_logit, token_count, low_resolution_logit_row_max, _ = get_low_resolution_logit( + query, key, block_size, mask + ) + else: + raise Exception('approx_mode must be "full" or "sparse"') + + with torch.no_grad(): + low_resolution_logit_normalized = low_resolution_logit - low_resolution_logit_row_max + indices, high_resolution_mask = get_block_idxes( + low_resolution_logit_normalized, + num_blocks, + approx_mode, + initial_prior_first_n_blocks, + initial_prior_diagonal_n_blocks, + ) + + high_resolution_logit = MraSampledDenseMatMul.operator_call( + query, key, indices, block_size=block_size + ) / math.sqrt(head_dim) + max_vals, max_vals_scatter = sparse_max(high_resolution_logit, indices, num_block_per_row, num_block_per_row) + high_resolution_logit = high_resolution_logit - max_vals_scatter + if mask is not None: + high_resolution_logit = high_resolution_logit - 1e4 * (1 - sparse_mask(mask, indices)[:, :, :, None]) + high_resolution_attn = torch.exp(high_resolution_logit) + high_resolution_attn_out = MraSparseDenseMatMul.operator_call( + high_resolution_attn, indices, value, num_block_per_row + ) + high_resolution_normalizer = MraReduceSum.operator_call( + high_resolution_attn, indices, num_block_per_row, num_block_per_row + ) + + if approx_mode == "full": + low_resolution_attn = ( + torch.exp(low_resolution_logit - low_resolution_logit_row_max - 1e4 * high_resolution_mask) + * token_count[:, None, :] + ) + + low_resolution_attn_out = ( + torch.matmul(low_resolution_attn, value_hat)[:, :, None, :] + .repeat(1, 1, block_size, 1) + .reshape(meta_batch, seq_len, head_dim) + ) + low_resolution_normalizer = ( + low_resolution_attn.sum(dim=-1)[:, :, None].repeat(1, 1, block_size).reshape(meta_batch, seq_len) + ) + + log_correction = low_resolution_logit_row_max.repeat(1, 1, block_size).reshape(meta_batch, seq_len) - max_vals + if mask is not None: + log_correction = log_correction * mask + + low_resolution_corr = torch.exp(log_correction * (log_correction <= 0).float()) + low_resolution_attn_out = low_resolution_attn_out * low_resolution_corr[:, :, None] + low_resolution_normalizer = low_resolution_normalizer * low_resolution_corr + + high_resolution_corr = torch.exp(-log_correction * (log_correction > 0).float()) + high_resolution_attn_out = high_resolution_attn_out * high_resolution_corr[:, :, None] + high_resolution_normalizer = high_resolution_normalizer * high_resolution_corr + + context_layer = (high_resolution_attn_out + low_resolution_attn_out) / ( + high_resolution_normalizer[:, :, None] + low_resolution_normalizer[:, :, None] + 1e-6 + ) + + elif approx_mode == "sparse": + context_layer = high_resolution_attn_out / (high_resolution_normalizer[:, :, None] + 1e-6) + else: + raise Exception('config.approx_mode must be "full" or "sparse"') + + if mask is not None: + context_layer = context_layer * mask[:, :, None] + + context_layer = context_layer.reshape(batch_size, num_head, seq_len, head_dim) + + return context_layer + + +class MraEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class MraSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + kernel_loaded = mra_cuda_kernel is not None + if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: + try: + load_cuda_kernels() + except Exception as e: + logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = ( + position_embedding_type if position_embedding_type is not None else config.position_embedding_type + ) + + self.num_block = (config.max_position_embeddings // 32) * config.block_per_row + self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2)) + + self.approx_mode = config.approx_mode + self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks + self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks + + def transpose_for_scores(self, layer): + new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + layer = layer.view(*new_layer_shape) + return layer.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask=None): + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + batch_size, num_heads, seq_len, head_dim = query_layer.size() + + # revert changes made by get_extended_attention_mask + attention_mask = 1.0 + attention_mask / 10000.0 + attention_mask = ( + attention_mask.squeeze().repeat(1, num_heads, 1).reshape(batch_size * num_heads, seq_len).int() + ) + + # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs + # smaller than this are padded with zeros. + gpu_warp_size = 32 + + if head_dim < gpu_warp_size: + pad_size = batch_size, num_heads, seq_len, gpu_warp_size - head_dim + + query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1) + key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1) + value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1) + + context_layer = mra2_attention( + query_layer.float(), + key_layer.float(), + value_layer.float(), + attention_mask.float(), + self.num_block, + approx_mode=self.approx_mode, + initial_prior_first_n_blocks=self.initial_prior_first_n_blocks, + initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks, + ) + + if head_dim < gpu_warp_size: + context_layer = context_layer[:, :, :, :head_dim] + + context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer,) + + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput +class MraSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class MraAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = MraSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = MraSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward(self, hidden_states, attention_mask=None): + self_outputs = self.self(hidden_states, attention_mask) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class MraIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput +class MraOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class MraLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = MraAttention(config) + self.add_cross_attention = config.add_cross_attention + self.intermediate = MraIntermediate(config) + self.output = MraOutput(config) + + def forward(self, hidden_states, attention_mask=None): + self_attention_outputs = self.attention(hidden_states, attention_mask) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class MraEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([MraLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + ) + else: + layer_outputs = layer_module(hidden_states, attention_mask) + + hidden_states = layer_outputs[0] + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + return BaseModelOutputWithCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform +class MraPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Mra +class MraLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = MraPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Mra +class MraOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = MraLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +# Copied from transformers.models.yoso.modeling_yoso.YosoPreTrainedModel with Yoso->Mra,yoso->mra +class MraPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MraConfig + base_model_prefix = "mra" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +MRA_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`MraConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MRA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare MRA Model transformer outputting raw hidden-states without any specific head on top.", + MRA_START_DOCSTRING, +) +class MraModel(MraPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.embeddings = MraEmbeddings(config) + self.encoder = MraEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutputWithCrossAttentions( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings("""MRA Model with a `language modeling` head on top.""", MRA_START_DOCSTRING) +class MraForMaskedLM(MraPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.mra = MraModel(config) + self.cls = MraOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mra( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +# Copied from transformers.models.yoso.modeling_yoso.YosoClassificationHead with Yoso->Mra +class MraClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + self.config = config + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = ACT2FN[self.config.hidden_act](x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +@add_start_docstrings( + """MRA Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks.""", + MRA_START_DOCSTRING, +) +class MraForSequenceClassification(MraPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.mra = MraModel(config) + self.classifier = MraClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mra( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """MRA Model with a multiple choice classification head on top (a linear layer on top of + the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", + MRA_START_DOCSTRING, +) +class MraForMultipleChoice(MraPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.mra = MraModel(config) + self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.mra( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) + pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) + pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) + pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) + logits = self.classifier(pooled_output) + + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """MRA Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", + MRA_START_DOCSTRING, +) +class MraForTokenClassification(MraPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.mra = MraModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mra( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) + ) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """MRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", + MRA_START_DOCSTRING, +) +class MraForQuestionAnswering(MraPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + config.num_labels = 2 + self.num_labels = config.num_labels + + self.mra = MraModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mra( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[1:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py b/venv/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6b613bba3888defc2555f697e878c6bd815156 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py @@ -0,0 +1,625 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fast tokenizer class for Nougat. +""" +import re +from functools import partial +from multiprocessing import Pool +from typing import List, Union + +import numpy as np + +from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING +from transformers.tokenization_utils_fast import PreTrainedTokenizerFast +from transformers.utils import add_end_docstrings + +from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends + + +if is_levenshtein_available(): + from Levenshtein import ratio + +if is_nltk_available(): + import nltk + + +logger = logging.get_logger(__name__) + + +INIT_TOKENIZER_DOCSTRING += """ + tokenizer_object ([`tokenizers.Tokenizer`]): + A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗 + tokenizers](../fast_tokenizers) for more information. + tokenizer_file ([`str`]): + A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗 + tokenizers. +""" + + +VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"} + + +def markdown_compatible(text: str) -> str: + """ + Make text compatible with Markdown formatting. + + This function makes various text formatting adjustments to make it compatible with Markdown. + + Args: + text (`str`): + The input text to be made Markdown-compatible. + + Returns: + `str`: The Markdown-compatible text. + """ + # equation tag + # Replace lines that start with a pattern like (decimal) \[some text\] with \[[some text] \tag{decimal}\]. + text = re.sub(r"^\(([\d.]+[a-zA-Z]?)\) \\\[(.+?)\\\]$", r"\[\2 \\tag{\1}\]", text, flags=re.M) + # Replace lines that start with a pattern like \[some text\] (decimal) with \[[some text] \tag{decimal}\]. + text = re.sub(r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\)$", r"\[\1 \\tag{\2}\]", text, flags=re.M) + # Replace lines that start with a pattern like \[some text\] (digits) \[another text\] with \[[some text] \tag{digits}\] [another text]. + text = re.sub( + r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\) (\\\[.+?\\\])$", + r"\[\1 \\tag{\2}\] \3", + text, + flags=re.M, + ) + # multi line + text = text.replace(r"\. ", ". ") + # bold formatting + text = text.replace(r"\bm{", r"\mathbf{").replace(r"{\\bm ", r"\mathbf{") + text = re.sub(r"\\mbox{ ?\\boldmath\$(.*?)\$}", r"\\mathbf{\1}", text) + # Reformat urls (http, ftp and https only) to markdown [url](url) clickable format + text = re.sub( + r"((?:http|ftp|https):\/\/(?:[\w_-]+(?:(?:\.[\w_-]+)+))(?:[\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-]))", + r"[\1](\1)", + text, + ) + # algorithms + text = re.sub(r"```\s*(.+?)\s*```", r"```\n\1\n```", text, flags=re.S) + + return text + + +def normalize_list_like_lines(generation): + """ + Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with + '-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such + lines to make them more structured. + + Args: + generation (str): The input text containing lines that need to be normalized. + + Returns: + str: The input text with the list-like lines normalized. + + Note: + The function uses regular expressions to identify and reformat the list-like lines. The patterns capture + optional bullet points, nesting levels indicated by numerals, and the actual list item content. The + normalization adjusts the bullet point style and nesting levels based on the captured patterns. + """ + + # This matches lines starting with - or *, not followed by - or * (lists) + # that are then numbered by digits \d or roman numerals (one or more) + # and then, optional additional numbering of this line is captured + # this is then fed to re.finditer. + pattern = r"(?:^)(-|\*)?(?!-|\*) ?((?:\d|[ixv])+ )?.+? (-|\*) (((?:\d|[ixv])+)\.(\d|[ixv]) )?.*(?:$)" + + for match in reversed(list(re.finditer(pattern, generation, flags=re.I | re.M))): + start, stop = match.span() + delim = match.group(3) + " " + splits = match.group(0).split(delim) + replacement = "" + + if match.group(1) is not None: + splits = splits[1:] + delim1 = match.group(1) + " " + else: + delim1 = "" + continue # Skip false positives + + pre, post = generation[:start], generation[stop:] + + for i, item in enumerate(splits): + level = 0 + potential_numeral, _, rest = item.strip().partition(" ") + if not rest: + continue + # Infer current nesting level based on detected numbering + if re.match(r"^[\dixv]+((?:\.[\dixv])?)+$", potential_numeral, flags=re.I | re.M): + level = potential_numeral.count(".") + + replacement += ( + ("\n" if i > 0 else "") + ("\t" * level) + (delim if i > 0 or start == 0 else delim1) + item.strip() + ) + + if post == "": + post = "\n" + + generation = pre + replacement + post + + return generation + + +def find_next_punctuation(text: str, start_idx=0): + """ + Find the index of the next punctuation mark. + + Args: + text (`str`): + String to examine + start_idx (`int`, *optional*) + Index where to start + """ + + for i in range(start_idx, len(text)): + if text[i] in [".", "?", "!", "\n"]: + return i + + return None + + +def truncate_repetitions(text: str, min_len: int = 30) -> str: + """ + Attempt to truncate repeating segments in the input string. + + This function looks for the longest repeating substring at the end of the input string and truncates it to appear + only once. To be considered for removal, repetitions need to be continuous. + + Args: + text (`str`): + The input raw prediction to be truncated. + min_len (int): + The minimum length of the repeating segment. + + Returns: + `str`: The input string with repeated segments truncated. + """ + text_lower = text.lower() + text_length = len(text_lower) + + if text_length < 2 * min_len: + return text + + # try to find a length at which the tail is repeating + max_repetition_length = None + for repetition_length in range(min_len, int(text_length / 2)): + # check if there is a repetition at the end + same = True + for i in range(0, repetition_length): + if text_lower[text_length - repetition_length - i - 1] != text_lower[text_length - i - 1]: + same = False + break + + if same: + max_repetition_length = repetition_length + + if max_repetition_length is None: + return text + + lcs = text_lower[-max_repetition_length:] + + # remove all but the last repetition + substituted_text = text + substituted_text_lower = text_lower + while substituted_text_lower.endswith(lcs): + substituted_text = substituted_text[:-max_repetition_length] + substituted_text_lower = substituted_text_lower[:-max_repetition_length] + + # this is the tail with the repetitions + repeating_tail = text_lower[len(substituted_text_lower) :] + + # add until next punctuation and make sure last sentence is not repeating + substituted_text_lower_out = substituted_text_lower + while True: + sentence_end = find_next_punctuation(text_lower, len(substituted_text_lower_out)) + sentence_start = find_next_punctuation(text_lower[::-1], len(substituted_text_lower_out)) + if sentence_end and sentence_start: + sentence = text_lower[sentence_start:sentence_end] + substituted_text_lower_out = text_lower[: sentence_end + 1] + if sentence in repeating_tail: + break + else: + break + + text_out = text[: len(substituted_text_lower_out)] + + return text_out + + +def remove_numbers(lines): + def _clean(s): + return re.sub(r"(?:[\d_]|\*\*)", "", s).strip() + + if isinstance(lines, str): + return _clean(lines) + out = [] + for l in lines: + out.append(_clean(l)) + return out + + +def get_slices(lines, clean_lines): + """ + Get slices of text based on specific criteria within the lines. + + This function identifies and returns slices of text from the input lines based on certain conditions. + + These conditions were chosen by the Nougat authors: + - The slice is less than 200 characters long. + - The slice is more than 3 characters long. + - The slice does not start with "[MISSING_PAGE". + - The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is + greater than 0.9. + + Args: + lines (`List[str]`): + The list of lines containing the text. + clean_lines (`List[str]`): + A cleaned version of the text (without numbers). + + Returns: + `List[tuple]`: A list of tuples representing the start and end indices of text slices. + """ + indices = np.zeros(len(lines)) + for i in range(len(lines) - 1): + j = i + 1 + while not clean_lines[j] and j < len(lines) - 1: + j += 1 + if ( + len(clean_lines[i]) < 200 + and len(clean_lines[i]) > 3 + and len(clean_lines[j]) < 200 + and len(clean_lines[j]) > 3 + and not clean_lines[i].startswith("[MISSING_PAGE") + and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9) + ): + indices[i:j] = 1 + ids = np.where(indices)[0] + slices = [] + if len(ids) == 0: + return slices + j0 = 0 + for j, x in enumerate(np.diff(ids) > 3): + if x: + slices.append((ids[j0], ids[j] + 2)) + j0 = j + 1 + slices.append((ids[j0], ids[-1] + 2)) + return [sli for sli in slices if sli[1] - sli[0] > 15] + + +def remove_slice_from_lines(lines, clean_text, slice) -> str: + """ + Remove a slice of text from the lines based on specific criteria. + + This function identifies a slice of text within the lines and removes it based on certain conditions. + + Args: + lines (list of str): The list of lines containing the text. + clean_text (list of str): A cleaned version of the text (without numbers). + slice (tuple): A tuple representing the start and end indices of the slice to be removed. + + Returns: + str: The removed slice of text as a single string. + """ + base = clean_text[slice[0]] + section = list(slice) + check_start_flag = False + # backwards pass, at most 5 lines + for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1): + if not lines[line_idx]: + continue + if lines[line_idx] == "## References": + section[0] = line_idx + break + elif ratio(base, remove_numbers(lines[line_idx])) < 0.9: + section[0] = line_idx + 1 + potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition("* [")[-1]) + if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9: + section[0] = line_idx + check_start_flag = True + break + # forward pass, at most 5 lines + for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)): + if ratio(base, remove_numbers(lines[line_idx])) < 0.9: + section[1] = line_idx + break + if len(lines) <= section[1]: + section[1] = len(lines) - 1 + to_delete = "\n".join(lines[section[0] : section[1] + 1]) + # cut off next page content + itera, iterb = enumerate(lines[section[1] - 1]), enumerate(lines[section[1]]) + while True: + try: + (ia, a) = next(itera) + while a.isnumeric(): + (ia, a) = next(itera) + (ib, b) = next(iterb) + while b.isnumeric(): + (ib, b) = next(iterb) + if a != b: + break + except StopIteration: + break + if check_start_flag and "* [" in to_delete: + to_delete = "* [" + to_delete.partition("* [")[-1] + try: + delta = len(lines[section[1]]) - ib - 1 + if delta > 0: + to_delete = to_delete[:-delta] + except UnboundLocalError: + pass + + return to_delete.strip() + + +@add_end_docstrings(INIT_TOKENIZER_DOCSTRING) +class NougatTokenizerFast(PreTrainedTokenizerFast): + """ + Fast tokenizer for Nougat (backed by HuggingFace tokenizers library). + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific + methods for postprocessing the generated text. + + Args: + vocab_file (`str`, *optional*): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that + contains the vocabulary necessary to instantiate a tokenizer. + tokenizer_file (`str`, *optional*): + [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + + clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): + Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra + spaces. + + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = None + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + clean_up_tokenization_spaces=False, + unk_token="", + bos_token="", + eos_token="", + pad_token="", + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + tokenizer_file=tokenizer_file, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + **kwargs, + ) + self.vocab_file = vocab_file + + def remove_hallucinated_references(self, text: str) -> str: + """ + Remove hallucinated or missing references from the text. + + This function identifies and removes references that are marked as missing or hallucinated from the input text. + + Args: + text (`str`): + The input text containing references. + + Returns: + `str`: The text with hallucinated references removed. + """ + lines = text.split("\n") + if len(lines) == 0: + return "" + clean_lines = remove_numbers(lines) + slices = get_slices(lines, clean_lines) + to_delete = [] + for slice in slices: + to_delete.append(remove_slice_from_lines(lines, clean_lines, slice)) + for to_delete in reversed(to_delete): + text = text.replace(to_delete, "\n\n[MISSING_PAGE_POST]\n\n") + text = re.sub( + r"## References\n+\[MISSING_PAGE_POST(:\d+)?\]", + "\n\n[MISSING_PAGE_POST\\1]", + text, + ) + return text + + def correct_tables(self, generation: str) -> str: + """ + Takes a generated string and fixes tables/tabulars to make them match the markdown format needed. + + Args: + generation (str): The generated text to be postprocessed. + + Returns: + str: The postprocessed text. + + Example: + + ```python + correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}") + "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}" + ``` + """ + # remove obvious wrong tables + for l in generation.split("\n"): + if l.count("\\begin{tabular}") > 15 or l.count("\\multicolumn") > 60 or l.count("&") > 400: + generation = generation.replace(l, "") + # whitespace corrections + + generation = generation.replace("\\begin{table} \\begin{tabular}", "\\begin{table}\n\\begin{tabular}") + generation = generation.replace("\\end{tabular} \\end{table}", "\\end{tabular}\n\\end{table}") + generation = generation.replace("\\end{table} Tab", "\\end{table}\nTab") + + generation = re.sub(r"(^.+)\\begin{tab", r"\1\n\\begin{tab", generation, flags=re.M) + + # Remove left-aligned empty LaTeX tabular blocks. + generation = generation.replace(r"\begin{tabular}{l l} & \\ \end{tabular}", "") + # Remove tabulars with just 2 newline characters. + generation = generation.replace("\\begin{tabular}{}\n\n\\end{tabular}", "") + return generation + + def post_process_single(self, generation: str, fix_markdown: bool = True) -> str: + """ + Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article + authors. These expressions are commented for clarity and tested end-to-end in most cases. + + Args: + generation (str): The generated text to be postprocessed. + fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True. + + Returns: + str: The postprocessed text. + """ + generation = re.sub( + r"(?:\n|^)#+ \d*\W? ?(.{100,})", r"\n\1", generation + ) # too long section titles probably are none + generation = generation.strip() + # Remove LaTeX left margin tag + generation = generation.replace("\n* [leftmargin=*]\n", "\n") + # Remove lines with markdown headings starting with #, with numerals, + # and possibly roman numerals with trailing spaces and newlines + generation = re.sub(r"^#+ (?:\.?(?:\d|[ixv])+)*\s*(?:$|\n\s*)", "", generation, flags=re.M) + # most likely hallucinated titles + lines = generation.split("\n") + if lines[-1].startswith("#") and lines[-1].lstrip("#").startswith(" ") and len(lines) > 1: + logger.info("Likely hallucinated title at the end of the page: " + lines[-1]) + generation = "\n".join(lines[:-1]) + # obvious repetition detection + generation = truncate_repetitions(generation) + # Reference corrections + generation = self.remove_hallucinated_references(generation) + # Remove lines starting with asterisks and numbers like "*[1]" and followed by capital letters and periods (ie too long references) + generation = re.sub(r"^\* \[\d+\](\s?[A-W]\.+\s?){10,}.*$", "", generation, flags=re.M) + # Remove empty brackets after a reference number in brackets. *[12][]ABC will become *[12]ABC + generation = re.sub(r"^(\* \[\d+\])\[\](.*)$", r"\1\2", generation, flags=re.M) + # Remove single characters before or after 2 new lines + generation = re.sub(r"(^\w\n\n|\n\n\w$)", "", generation) + # pmc math artifact correction + generation = re.sub( + r"([\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\s.,:()])", + r"\1\(\2_{\3}\)\4", + generation, + ) + generation = re.sub(r"([\s.,\d])_([a-zA-Z0-9])_([\s.,\d;])", r"\1\(\2\)\3", generation) + # footnote mistakes + generation = re.sub( + r"(\nFootnote .*?:) (?:footnotetext|thanks):\W*(.*(?:\n\n|$))", + r"\1 \2", + generation, + ) + # TODO Come up with footnote formatting inside a table + generation = re.sub(r"\[FOOTNOTE:.+?\](.*?)\[ENDFOOTNOTE\]", "", generation) + # itemize post processing + generation = normalize_list_like_lines(generation) + + if generation.endswith((".", "}")): + generation += "\n\n" + if re.match(r"[A-Z0-9,;:]$", generation): + # add space in case it there is a comma or word ending + generation += " " + elif generation.startswith(("#", "**", "\\begin")): + generation = "\n\n" + generation + elif generation.split("\n")[-1].startswith(("#", "Figure", "Table")): + generation = generation + "\n\n" + else: + try: + last_word = generation.split(" ")[-1] + if last_word in nltk.corpus.words.words(): + generation += " " + except LookupError: + # add space just in case. Will split words but better than concatenating them + generation += " " + + # table corrections + generation = self.correct_tables(generation) + # Remove optional, empty square brackets after begin{array} + generation = generation.replace("\\begin{array}[]{", "\\begin{array}{") + # Remove empty or malformed LaTeX tabular blocks with 2 or more columns specified, with spaces and ampersands. + generation = re.sub( + r"\\begin{tabular}{([clr ]){2,}}\s*[& ]*\s*(\\\\)? \\end{tabular}", + "", + generation, + ) + # Remove lines containing "S.A.B." one or more times. Was included in Nougat's code. + generation = re.sub(r"(\*\*S\. A\. B\.\*\*\n+){2,}", "", generation) + # Remove markdown-style headers that are incomplete or empty on multiple lines. + generation = re.sub(r"^#+( [\[\d\w])?$", "", generation, flags=re.M) + # Remove lines with just one period. + generation = re.sub(r"^\.\s*$", "", generation, flags=re.M) + # Replace instances of three or more newlines with just two newlines. + generation = re.sub(r"\n{3,}", "\n\n", generation) + if fix_markdown: + return markdown_compatible(generation) + else: + return generation + + def post_process_generation( + self, + generation: Union[str, List[str]], + fix_markdown: bool = True, + num_workers: int = None, + ) -> Union[str, List[str]]: + """ + Postprocess a generated text or a list of generated texts. + + This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting. + + Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process. + + Args: + generation (Union[str, List[str]]): + The generated text or a list of generated texts. + fix_markdown (`bool`, *optional*, defaults to `True`): + Whether to perform Markdown formatting fixes. + num_workers (`int`, *optional*): + Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in + parallel). + + Returns: + Union[str, List[str]]: The postprocessed text or list of postprocessed texts. + """ + requires_backends(self, ["nltk", "levenshtein"]) + + if isinstance(generation, list): + if num_workers is not None and isinstance(num_workers, int): + with Pool(num_workers) as p: + return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation) + else: + return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] + else: + return self.post_process_single(generation, fix_markdown=fix_markdown)