diff --git a/.gitattributes b/.gitattributes index 2f985e132b445288c7ecd2de42fd1a3207dc4156..600d790ba7203b3785ae789d364bb9d8b36477e6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -155,3 +155,4 @@ venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so fi venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/triton/_C/libtriton.so filter=lfs diff=lfs merge=lfs -text diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7cf771e41aa5f50e0877730c86a8d2a59511d71 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682676 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..e8976da8083890a08a6db60c35e0a69168f10109 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:31:17,572 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:31:21,977 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:31:21,979 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:31:21,979 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step20'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step20'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..4882a1ca8dd13cc099639cb2ae60012869094224 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:31:17.439211", + "startedAt": "2024-05-14T10:31:16.967237", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.6333815789476, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.996, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.005, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3335.565, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.314, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3309.009, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92273330688477 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..df860d90e36599abc17cfebb5ce44178113efb4e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 10:31:16,980 INFO StreamThr :9993 [internal.py:wandb_internal():85] W&B internal server running at pid: 9993, started at: 2024-05-14 10:31:16.979292 +2024-05-14 10:31:16,983 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:31:16,983 INFO WriterThread:9993 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/run-u1a30na0.wandb +2024-05-14 10:31:16,984 DEBUG SenderThread:9993 [sender.py:send():378] send: header +2024-05-14 10:31:16,996 DEBUG SenderThread:9993 [sender.py:send():378] send: run +2024-05-14 10:31:17,262 INFO SenderThread:9993 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files +2024-05-14 10:31:17,262 INFO SenderThread:9993 [sender.py:_start_run_threads():1123] run started: u1a30na0 with start time 1715682676.978644 +2024-05-14 10:31:17,269 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:31:17,269 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:31:17,352 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:31:17,354 DEBUG HandlerThread:9993 [system_info.py:__init__():26] System info init +2024-05-14 10:31:17,354 DEBUG HandlerThread:9993 [system_info.py:__init__():41] System info init done +2024-05-14 10:31:17,354 INFO HandlerThread:9993 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:31:17,354 INFO SystemMonitor:9993 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:31:17,354 INFO HandlerThread:9993 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:31:17,355 INFO SystemMonitor:9993 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:31:17,355 INFO SystemMonitor:9993 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:31:17,356 INFO SystemMonitor:9993 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:31:17,356 INFO SystemMonitor:9993 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:31:17,439 DEBUG HandlerThread:9993 [system_info.py:probe():150] Probing system +2024-05-14 10:31:17,447 DEBUG HandlerThread:9993 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:31:17,467 ERROR HandlerThread:9993 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:31:17,467 DEBUG HandlerThread:9993 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:31:17,467 DEBUG HandlerThread:9993 [system_info.py:probe():198] Probing system done +2024-05-14 10:31:17,467 DEBUG HandlerThread:9993 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:31:17.439211', 'startedAt': '2024-05-14T10:31:16.967237', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.6333815789476, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.996, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.005, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3335.565, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.314, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3309.009, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92273330688477}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:31:17,467 INFO HandlerThread:9993 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:31:17,467 INFO HandlerThread:9993 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:31:17,469 INFO HandlerThread:9993 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:31:17,472 DEBUG SenderThread:9993 [sender.py:send():378] send: files +2024-05-14 10:31:17,472 INFO SenderThread:9993 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:31:17,568 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:31:17,569 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:31:17,569 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:31:17,569 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:31:17,686 DEBUG SenderThread:9993 [sender.py:send():378] send: telemetry +2024-05-14 10:31:18,008 INFO wandb-upload_0:9993 [upload_job.py:push():130] Uploaded file /tmp/tmpxfiju_vqwandb/btebish7-wandb-metadata.json +2024-05-14 10:31:18,263 INFO Thread-12 :9993 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt +2024-05-14 10:31:18,263 INFO Thread-12 :9993 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log +2024-05-14 10:31:18,263 INFO Thread-12 :9993 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-metadata.json +2024-05-14 10:31:20,263 INFO Thread-12 :9993 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log +2024-05-14 10:31:22,980 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,057 DEBUG SenderThread:9993 [sender.py:send():378] send: exit +2024-05-14 10:31:23,057 INFO SenderThread:9993 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:31:23,057 INFO SenderThread:9993 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:31:23,058 INFO SenderThread:9993 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,058 INFO SenderThread:9993 [sender.py:send_exit():593] send defer +2024-05-14 10:31:23,058 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,058 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:31:23,059 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,059 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:31:23,059 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:31:23,059 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,059 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:31:23,059 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,059 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:31:23,059 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:31:23,059 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,059 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:31:23,059 INFO HandlerThread:9993 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:31:23,059 DEBUG SystemMonitor:9993 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:31:23,060 INFO HandlerThread:9993 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:31:23,060 DEBUG SystemMonitor:9993 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:31:23,060 INFO HandlerThread:9993 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:31:23,060 DEBUG SystemMonitor:9993 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:31:23,060 INFO HandlerThread:9993 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:31:23,061 INFO HandlerThread:9993 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:31:23,061 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,062 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:31:23,062 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:31:23,062 DEBUG SenderThread:9993 [sender.py:send():378] send: stats +2024-05-14 10:31:23,062 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,062 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:31:23,062 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,062 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:31:23,062 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:31:23,062 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,062 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:31:23,063 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,063 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:31:23,063 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:31:23,063 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,063 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:31:23,063 DEBUG SenderThread:9993 [sender.py:send():378] send: summary +2024-05-14 10:31:23,064 INFO SenderThread:9993 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,064 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,064 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:31:23,064 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:31:23,064 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,064 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:31:23,064 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,064 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:31:23,066 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,265 INFO Thread-12 :9993 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json +2024-05-14 10:31:23,453 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:31:23,453 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,453 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:31:23,453 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,453 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:31:23,706 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:31:23,706 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,706 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:31:23,706 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,706 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:31:23,706 INFO SenderThread:9993 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:31:23,707 INFO SenderThread:9993 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:31:23,707 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:31:23,707 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,707 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:31:23,707 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,707 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:31:23,707 INFO SenderThread:9993 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:31:24,057 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt requirements.txt +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log output.log +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:31:24,266 INFO SenderThread:9993 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json wandb-summary.json +2024-05-14 10:31:24,267 INFO SenderThread:9993 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml config.yaml +2024-05-14 10:31:24,267 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:31:24,267 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:24,268 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:24,270 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:31:24,271 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:24,271 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:31:24,272 INFO SenderThread:9993 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:24,515 INFO wandb-upload_0:9993 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/requirements.txt +2024-05-14 10:31:24,679 INFO wandb-upload_1:9993 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/output.log +2024-05-14 10:31:24,768 INFO wandb-upload_3:9993 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/config.yaml +2024-05-14 10:31:24,774 INFO wandb-upload_2:9993 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/files/wandb-summary.json +2024-05-14 10:31:24,974 INFO Thread-11 (_thread_body):9993 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:31:24,975 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:24,975 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:31:24,975 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:24,975 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:31:24,975 INFO SenderThread:9993 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:31:24,975 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:31:24,975 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:24,976 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:31:24,976 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:24,976 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:31:24,976 INFO SenderThread:9993 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:25,057 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,561 INFO SenderThread:9993 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:25,561 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:25,561 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,561 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,561 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:25,561 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,561 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:25,561 INFO SenderThread:9993 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:25,561 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,561 DEBUG SenderThread:9993 [sender.py:send():378] send: final +2024-05-14 10:31:25,562 INFO HandlerThread:9993 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:25,562 DEBUG SenderThread:9993 [sender.py:send():378] send: footer +2024-05-14 10:31:25,562 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,562 INFO SenderThread:9993 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:25,562 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,562 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,562 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,563 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,563 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:25,563 DEBUG SenderThread:9993 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:25,564 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:25,564 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:25,564 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:25,626 INFO MainThread:9993 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:25,626 INFO MainThread:9993 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:25,626 INFO MainThread:9993 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:25,626 DEBUG HandlerThread:9993 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:25,626 INFO HandlerThread:9993 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:26,563 INFO WriterThread:9993 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/run-u1a30na0.wandb +2024-05-14 10:31:26,626 INFO SenderThread:9993 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:26,626 INFO SenderThread:9993 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:26,626 INFO SenderThread:9993 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..3ee3e471b76d38555e72f94033603e4843d9f160 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-14 10:31:16,975 INFO MainThread:8785 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:31:16,975 INFO MainThread:8785 [wandb_setup.py:_flush():76] Configure stats pid to 8785 +2024-05-14 10:31:16,975 INFO MainThread:8785 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:31:16,976 WARNING MainThread:8785 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug.log +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/logs/debug-internal.log +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:init():610] starting backend +2024-05-14 10:31:16,976 INFO MainThread:8785 [wandb_init.py:init():614] setting up manager +2024-05-14 10:31:16,977 INFO MainThread:8785 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:31:16,978 INFO MainThread:8785 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:31:16,983 INFO MainThread:8785 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:31:16,995 INFO MainThread:8785 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:31:17,268 INFO MainThread:8785 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:31:17,348 INFO MainThread:8785 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:31:17,348 INFO MainThread:8785 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:31:17,569 INFO MainThread:8785 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:31:17,569 INFO MainThread:8785 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:31:17,569 INFO MainThread:8785 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:31:17,569 INFO MainThread:8785 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:31:17,570 INFO MainThread:8785 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/run-u1a30na0.wandb b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/run-u1a30na0.wandb new file mode 100644 index 0000000000000000000000000000000000000000..c7e96de1d3d09118ce2cb2a60b61bf8686ffd0a6 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103116-u1a30na0/run-u1a30na0.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b7a388c2e5e939342bd8719e3f3329bbe709f39 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/config.yaml @@ -0,0 +1,86 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715687084 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + indiccopa-hi: + task: indiccopa-hi + group: ai4bharat/IndicCOPA + dataset_path: ai4bharat/IndicCOPA + dataset_name: translation-hi + test_split: test + doc_to_text: 'functools.partial(, connector={''cause'': + ''कारण'', ''effect'': ''परिणाम''})' + doc_to_target: label + doc_to_choice: "def doc_to_choice(doc):\n return [convert_choice(doc[\"choice1\"\ + ]), convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100 + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/media/table/evaluation/eval_results_1_c78c57917f215d296d9d.table.json b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/media/table/evaluation/eval_results_1_c78c57917f215d296d9d.table.json new file mode 100644 index 0000000000000000000000000000000000000000..fc9a92c5b80c0631baf44f28a9d55b68d269f5da --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/media/table/evaluation/eval_results_1_c78c57917f215d296d9d.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["indiccopa-hi", 1.0, "none", 0, "acc", "0.534521158129176", "0.0236"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/output.log b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9a824a416e20bb922e798e55a644c7732e33a510 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:11:44:44,822 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:11:44:49,790 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:11:44:49,793 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:11:44:49,793 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:11:44:58,246 INFO [huggingface.py:334] Using 8 devices with data parallelism +2024-05-14:11:44:58,249 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:11:44:58,249 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 11:44:57,866] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:11:45:03,726 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:45:03,726 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:45:03,745 INFO [task.py:395] Building contexts for indiccopa-hi on rank 0... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 57/57 [00:00<00:00, 101994.59it/s] +2024-05-14:11:45:05,430 INFO [evaluator.py:379] Running loglikelihood requests +Running loglikelihood requests: 0%| | 0/114 [00:00, connector={'cause': 'कारण', 'effect': 'परिणाम'})", 'doc_to_target': 'label', 'doc_to_choice': 'def doc_to_choice(doc):\n return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}}, 'cli_configs': {'model': 'hf', 'model_args': 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', 'batch_size': 'auto', 'batch_sizes': [64], 'device': None, 'use_cache': None, 'limit': None, 'bootstrap_iters': 100000, 'gen_kwargs': None}} +2024-05-14 11:45:15,748 INFO MainThread:82825 [wandb_run.py:_finish():2103] finishing run smlgenai/bharatgpt/07tmzq68 +2024-05-14 11:45:15,748 INFO MainThread:82825 [wandb_run.py:_atexit_cleanup():2343] got exitcode: 0 +2024-05-14 11:45:15,749 INFO MainThread:82825 [wandb_run.py:_restore():2326] restore +2024-05-14 11:45:15,750 INFO MainThread:82825 [wandb_run.py:_restore():2332] restore done +2024-05-14 11:45:22,507 INFO MainThread:82825 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 11:45:22,508 INFO MainThread:82825 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 11:45:22,513 INFO MainThread:82825 [wandb_run.py:_footer_sync_info():3953] logging synced files diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/run-07tmzq68.wandb b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/run-07tmzq68.wandb new file mode 100644 index 0000000000000000000000000000000000000000..6e7b81a15abd61d22f38e302c2a274fa2cd91d76 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_114444-07tmzq68/run-07tmzq68.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7dabc891edb6d0c65a5c4671e8f6d6510d0fbed --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704464 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..db36fbda4c5e0b4b4d016ce34470a1c916cb6041 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:34:25,029 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:34:30,618 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:34:30,622 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:34:30,622 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +[2024-05-14 16:34:41,216] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:34:41,625 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:34:41,625 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:16:34:42,849 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:34:42,849 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:34:42,871 INFO [task.py:395] Building contexts for indiccopa-hi on rank 6... +100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 94978.17it/s] +2024-05-14:16:34:44,718 INFO [evaluator.py:379] Running loglikelihood requests +Determined largest batch size: 64 +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..630e70847432e17fdc21b64be21e9a250c3e564b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:34:24.898265", + "startedAt": "2024-05-14T16:34:24.422056", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3394.211644736842, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3214.846, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3214.903, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3286.563, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4841270446777 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..f54bb3d71ddd5d7fb0c66b39e940c1d03cfc7cd1 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 27}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..1381a9f3c4d4b673a81e450eb03d2becc8f88a60 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug-internal.log @@ -0,0 +1,195 @@ +2024-05-14 16:34:24,434 INFO StreamThr :117704 [internal.py:wandb_internal():85] W&B internal server running at pid: 117704, started at: 2024-05-14 16:34:24.433551 +2024-05-14 16:34:24,436 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:34:24,437 INFO WriterThread:117704 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/run-ymrq791n.wandb +2024-05-14 16:34:24,438 DEBUG SenderThread:117704 [sender.py:send():378] send: header +2024-05-14 16:34:24,449 DEBUG SenderThread:117704 [sender.py:send():378] send: run +2024-05-14 16:34:24,723 INFO SenderThread:117704 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files +2024-05-14 16:34:24,724 INFO SenderThread:117704 [sender.py:_start_run_threads():1123] run started: ymrq791n with start time 1715704464.43321 +2024-05-14 16:34:24,729 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:34:24,730 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:34:24,830 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:34:24,832 DEBUG HandlerThread:117704 [system_info.py:__init__():26] System info init +2024-05-14 16:34:24,832 DEBUG HandlerThread:117704 [system_info.py:__init__():41] System info init done +2024-05-14 16:34:24,832 INFO HandlerThread:117704 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:34:24,832 INFO SystemMonitor:117704 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:34:24,832 INFO HandlerThread:117704 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:34:24,832 INFO SystemMonitor:117704 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:34:24,833 INFO SystemMonitor:117704 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:34:24,834 INFO SystemMonitor:117704 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:34:24,834 INFO SystemMonitor:117704 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:34:24,898 DEBUG HandlerThread:117704 [system_info.py:probe():150] Probing system +2024-05-14 16:34:24,906 DEBUG HandlerThread:117704 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:34:24,927 ERROR HandlerThread:117704 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:34:24,927 DEBUG HandlerThread:117704 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:34:24,927 DEBUG HandlerThread:117704 [system_info.py:probe():198] Probing system done +2024-05-14 16:34:24,927 DEBUG HandlerThread:117704 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:34:24.898265', 'startedAt': '2024-05-14T16:34:24.422056', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.211644736842, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3214.846, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3214.903, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3286.563, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4841270446777}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:34:24,927 INFO HandlerThread:117704 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:34:24,927 INFO HandlerThread:117704 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:34:24,928 INFO HandlerThread:117704 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:34:24,932 DEBUG SenderThread:117704 [sender.py:send():378] send: files +2024-05-14 16:34:24,932 INFO SenderThread:117704 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:34:25,026 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:34:25,026 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:34:25,026 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:34:25,028 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:34:25,258 DEBUG SenderThread:117704 [sender.py:send():378] send: telemetry +2024-05-14 16:34:25,516 INFO wandb-upload_0:117704 [upload_job.py:push():130] Uploaded file /tmp/tmplstuh0ggwandb/k8z60wst-wandb-metadata.json +2024-05-14 16:34:25,725 INFO Thread-12 :117704 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-metadata.json +2024-05-14 16:34:25,725 INFO Thread-12 :117704 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt +2024-05-14 16:34:25,725 INFO Thread-12 :117704 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:27,725 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:30,260 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:31,727 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:35,623 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:39,737 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:40,027 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:34:40,028 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:34:41,174 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:41,739 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:43,742 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:45,743 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:46,281 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:47,745 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:51,281 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:52,304 DEBUG SenderThread:117704 [sender.py:send():378] send: exit +2024-05-14 16:34:52,305 INFO SenderThread:117704 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:34:52,305 INFO SenderThread:117704 [sender.py:send_exit():587] handling runtime: 27 +2024-05-14 16:34:52,306 INFO SenderThread:117704 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:34:52,306 INFO SenderThread:117704 [sender.py:send_exit():593] send defer +2024-05-14 16:34:52,306 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,307 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:34:52,307 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,307 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:34:52,307 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:34:52,307 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,307 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:34:52,307 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,307 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:34:52,307 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:34:52,308 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,308 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:34:52,308 INFO HandlerThread:117704 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:34:52,308 DEBUG SystemMonitor:117704 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:34:52,308 DEBUG SystemMonitor:117704 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:34:52,308 INFO HandlerThread:117704 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:34:52,309 DEBUG SystemMonitor:117704 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:34:52,309 INFO HandlerThread:117704 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:34:52,310 INFO HandlerThread:117704 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:34:52,310 INFO HandlerThread:117704 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:34:52,311 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,311 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:34:52,311 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:34:52,311 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,311 DEBUG SenderThread:117704 [sender.py:send():378] send: stats +2024-05-14 16:34:52,311 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:34:52,312 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,312 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:34:52,312 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:34:52,312 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,312 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:34:52,312 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,312 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:34:52,312 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:34:52,312 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,313 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:34:52,313 DEBUG SenderThread:117704 [sender.py:send():378] send: summary +2024-05-14 16:34:52,313 INFO SenderThread:117704 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:34:52,314 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,314 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:34:52,314 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:34:52,314 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,314 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:34:52,314 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,314 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:34:52,316 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:34:52,384 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:34:52,384 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:52,384 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:34:52,385 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:52,385 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:34:52,749 INFO Thread-12 :117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml +2024-05-14 16:34:52,749 INFO Thread-12 :117704 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json +2024-05-14 16:34:53,304 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:55,273 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:34:55,273 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:55,273 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,273 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:34:55,273 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,273 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:34:55,274 INFO SenderThread:117704 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:34:55,274 INFO SenderThread:117704 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:34:55,274 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:34:55,274 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,274 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:34:55,274 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,274 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:34:55,274 INFO SenderThread:117704 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:34:55,305 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:55,750 INFO SenderThread:117704 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:55,751 INFO SenderThread:117704 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files +2024-05-14 16:34:55,751 INFO SenderThread:117704 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml config.yaml +2024-05-14 16:34:55,751 INFO SenderThread:117704 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log output.log +2024-05-14 16:34:55,751 INFO SenderThread:117704 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:34:55,751 INFO SenderThread:117704 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt requirements.txt +2024-05-14 16:34:55,752 INFO SenderThread:117704 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json wandb-summary.json +2024-05-14 16:34:55,753 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:34:55,753 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:55,753 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:55,755 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:34:55,755 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:55,758 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:34:55,758 INFO SenderThread:117704 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:34:55,999 INFO wandb-upload_1:117704 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/output.log +2024-05-14 16:34:56,196 INFO wandb-upload_0:117704 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/config.yaml +2024-05-14 16:34:56,246 INFO wandb-upload_3:117704 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/wandb-summary.json +2024-05-14 16:34:56,246 INFO wandb-upload_2:117704 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/files/requirements.txt +2024-05-14 16:34:56,306 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,306 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,447 INFO Thread-11 (_thread_body):117704 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:34:56,447 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,447 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:34:56,447 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,447 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:34:56,447 INFO SenderThread:117704 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:34:56,447 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:34:56,448 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,448 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:34:56,448 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,448 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:34:56,448 INFO SenderThread:117704 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:34:56,509 INFO SenderThread:117704 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:34:56,509 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:34:56,509 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,509 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:34:56,509 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,509 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:34:56,509 INFO SenderThread:117704 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:34:56,510 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:34:56,510 INFO HandlerThread:117704 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:34:56,510 DEBUG SenderThread:117704 [sender.py:send():378] send: final +2024-05-14 16:34:56,510 DEBUG SenderThread:117704 [sender.py:send():378] send: footer +2024-05-14 16:34:56,510 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: defer +2024-05-14 16:34:56,510 INFO SenderThread:117704 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:34:56,510 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,510 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,511 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:34:56,511 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:34:56,511 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:34:56,511 DEBUG SenderThread:117704 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:34:56,512 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:34:56,513 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:34:56,513 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:34:56,566 INFO MainThread:117704 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:34:56,566 INFO MainThread:117704 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:34:56,566 INFO MainThread:117704 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:34:56,566 DEBUG HandlerThread:117704 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:34:56,566 INFO HandlerThread:117704 [handler.py:finish():882] shutting down handler +2024-05-14 16:34:57,511 INFO WriterThread:117704 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/run-ymrq791n.wandb +2024-05-14 16:34:57,566 INFO SenderThread:117704 [sender.py:finish():1545] shutting down sender +2024-05-14 16:34:57,566 INFO SenderThread:117704 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:34:57,566 INFO SenderThread:117704 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..e55fc97c323b1ba99d1aced96402bdd2616e09df --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Configure stats pid to 116798 +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:34:24,430 WARNING MainThread:116798 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug.log +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/logs/debug-internal.log +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:init():610] starting backend +2024-05-14 16:34:24,430 INFO MainThread:116798 [wandb_init.py:init():614] setting up manager +2024-05-14 16:34:24,432 INFO MainThread:116798 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:34:24,433 INFO MainThread:116798 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:34:24,435 INFO MainThread:116798 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:34:24,448 INFO MainThread:116798 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:34:24,729 INFO MainThread:116798 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:34:24,826 INFO MainThread:116798 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:34:24,826 INFO MainThread:116798 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:34:25,027 INFO MainThread:116798 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:34:25,027 INFO MainThread:116798 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:34:25,027 INFO MainThread:116798 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:34:25,027 INFO MainThread:116798 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:34:25,028 INFO MainThread:116798 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:34:57,567 WARNING MsgRouterThr:116798 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/run-ymrq791n.wandb b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/run-ymrq791n.wandb new file mode 100644 index 0000000000000000000000000000000000000000..f673c12647e868a271aa0af15f21ad1a71afc3ee Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163424-ymrq791n/run-ymrq791n.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..ea7b2fe146fc3f591349600aa97191223d298cd5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T16:45:48.393379", + "startedAt": "2024-05-22T16:45:47.835280", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.50001875, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.64420318603516 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..96c5be8c1e96f9521004777e15a7f9a017a4dc07 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-22 16:45:47,857 INFO StreamThr :1526 [internal.py:wandb_internal():85] W&B internal server running at pid: 1526, started at: 2024-05-22 16:45:47.855406 +2024-05-22 16:45:47,862 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: status +2024-05-22 16:45:47,864 INFO WriterThread:1526 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/run-pxpzv850.wandb +2024-05-22 16:45:47,865 DEBUG SenderThread:1526 [sender.py:send():378] send: header +2024-05-22 16:45:47,869 DEBUG SenderThread:1526 [sender.py:send():378] send: run +2024-05-22 16:45:48,175 INFO SenderThread:1526 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files +2024-05-22 16:45:48,175 INFO SenderThread:1526 [sender.py:_start_run_threads():1123] run started: pxpzv850 with start time 1716396347.855857 +2024-05-22 16:45:48,178 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 16:45:48,178 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: check_version +2024-05-22 16:45:48,297 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 16:45:48,299 DEBUG HandlerThread:1526 [system_info.py:__init__():26] System info init +2024-05-22 16:45:48,299 DEBUG HandlerThread:1526 [system_info.py:__init__():41] System info init done +2024-05-22 16:45:48,299 INFO HandlerThread:1526 [system_monitor.py:start():194] Starting system monitor +2024-05-22 16:45:48,299 INFO SystemMonitor:1526 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 16:45:48,299 INFO HandlerThread:1526 [system_monitor.py:probe():214] Collecting system info +2024-05-22 16:45:48,306 INFO SystemMonitor:1526 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 16:45:48,306 INFO SystemMonitor:1526 [interfaces.py:start():188] Started disk monitoring +2024-05-22 16:45:48,313 INFO SystemMonitor:1526 [interfaces.py:start():188] Started memory monitoring +2024-05-22 16:45:48,313 INFO SystemMonitor:1526 [interfaces.py:start():188] Started network monitoring +2024-05-22 16:45:48,393 DEBUG HandlerThread:1526 [system_info.py:probe():150] Probing system +2024-05-22 16:45:48,396 DEBUG HandlerThread:1526 [system_info.py:_probe_git():135] Probing git +2024-05-22 16:45:48,407 ERROR HandlerThread:1526 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 16:45:48,407 DEBUG HandlerThread:1526 [system_info.py:_probe_git():143] Probing git done +2024-05-22 16:45:48,407 DEBUG HandlerThread:1526 [system_info.py:probe():198] Probing system done +2024-05-22 16:45:48,407 DEBUG HandlerThread:1526 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T16:45:48.393379', 'startedAt': '2024-05-22T16:45:47.835280', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.50001875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64420318603516}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 16:45:48,407 INFO HandlerThread:1526 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 16:45:48,407 INFO HandlerThread:1526 [system_monitor.py:probe():227] Publishing system info +2024-05-22 16:45:48,410 INFO HandlerThread:1526 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 16:45:48,415 DEBUG SenderThread:1526 [sender.py:send():378] send: files +2024-05-22 16:45:48,415 INFO SenderThread:1526 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 16:45:48,588 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 16:45:48,588 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: python_packages +2024-05-22 16:45:48,589 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 16:45:48,590 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: stop_status +2024-05-22 16:45:48,740 DEBUG SenderThread:1526 [sender.py:send():378] send: telemetry +2024-05-22 16:45:48,970 INFO wandb-upload_0:1526 [upload_job.py:push():130] Uploaded file /tmp/tmpd7bxbo4_wandb/kitau2sw-wandb-metadata.json +2024-05-22 16:45:49,176 INFO Thread-12 :1526 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +2024-05-22 16:45:49,177 INFO Thread-12 :1526 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/requirements.txt +2024-05-22 16:45:49,177 INFO Thread-12 :1526 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-metadata.json +2024-05-22 16:45:51,176 INFO Thread-12 :1526 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +2024-05-22 16:45:53,742 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:45:59,101 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:45:59,184 INFO Thread-12 :1526 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +2024-05-22 16:45:59,420 DEBUG SenderThread:1526 [sender.py:send():378] send: exit +2024-05-22 16:45:59,420 INFO SenderThread:1526 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 16:45:59,420 INFO SenderThread:1526 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 16:45:59,422 INFO SenderThread:1526 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 16:45:59,422 INFO SenderThread:1526 [sender.py:send_exit():593] send defer +2024-05-22 16:45:59,422 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,422 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 16:45:59,422 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,422 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 16:45:59,422 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 1 +2024-05-22 16:45:59,422 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,423 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 16:45:59,423 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,423 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 16:45:59,423 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 2 +2024-05-22 16:45:59,423 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,423 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 16:45:59,423 INFO HandlerThread:1526 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 16:45:59,423 DEBUG SystemMonitor:1526 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 16:45:59,423 DEBUG SystemMonitor:1526 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 16:45:59,423 DEBUG SystemMonitor:1526 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 16:45:59,424 INFO HandlerThread:1526 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 16:45:59,424 INFO HandlerThread:1526 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 16:45:59,424 INFO HandlerThread:1526 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 16:45:59,424 INFO HandlerThread:1526 [interfaces.py:finish():200] Joined network monitor +2024-05-22 16:45:59,424 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,424 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 16:45:59,424 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 3 +2024-05-22 16:45:59,425 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,425 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 16:45:59,425 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,425 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 16:45:59,425 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 4 +2024-05-22 16:45:59,425 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,425 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 16:45:59,425 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,425 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 16:45:59,425 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 5 +2024-05-22 16:45:59,425 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,425 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 16:45:59,425 DEBUG SenderThread:1526 [sender.py:send():378] send: summary +2024-05-22 16:45:59,426 INFO SenderThread:1526 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 16:45:59,426 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,426 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 16:45:59,426 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 6 +2024-05-22 16:45:59,427 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,427 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 16:45:59,427 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,427 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 16:45:59,432 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:45:59,513 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 7 +2024-05-22 16:45:59,513 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:45:59,513 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 16:45:59,513 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:45:59,513 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 16:46:00,185 INFO Thread-12 :1526 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/config.yaml +2024-05-22 16:46:00,185 INFO Thread-12 :1526 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-summary.json +2024-05-22 16:46:00,420 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:46:00,766 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 8 +2024-05-22 16:46:00,766 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:46:00,766 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:00,766 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 16:46:00,766 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:00,767 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 16:46:00,767 INFO SenderThread:1526 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 16:46:00,767 INFO SenderThread:1526 [job_builder.py:_get_source_type():576] no source found +2024-05-22 16:46:00,767 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 9 +2024-05-22 16:46:00,767 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:00,767 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 16:46:00,767 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:00,767 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 16:46:00,768 INFO SenderThread:1526 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 16:46:01,186 INFO SenderThread:1526 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +2024-05-22 16:46:01,187 INFO SenderThread:1526 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files +2024-05-22 16:46:01,187 INFO SenderThread:1526 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-summary.json wandb-summary.json +2024-05-22 16:46:01,187 INFO SenderThread:1526 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-metadata.json wandb-metadata.json +2024-05-22 16:46:01,189 INFO SenderThread:1526 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/requirements.txt requirements.txt +2024-05-22 16:46:01,190 INFO SenderThread:1526 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/config.yaml config.yaml +2024-05-22 16:46:01,190 INFO SenderThread:1526 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log output.log +2024-05-22 16:46:01,190 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 10 +2024-05-22 16:46:01,190 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:01,190 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 16:46:01,192 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:01,192 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 16:46:01,192 INFO SenderThread:1526 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 16:46:01,420 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:46:01,421 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:46:01,440 INFO wandb-upload_0:1526 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/wandb-summary.json +2024-05-22 16:46:01,631 INFO wandb-upload_3:1526 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/output.log +2024-05-22 16:46:01,846 INFO wandb-upload_1:1526 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/requirements.txt +2024-05-22 16:46:01,852 INFO wandb-upload_2:1526 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/files/config.yaml +2024-05-22 16:46:02,052 INFO Thread-11 (_thread_body):1526 [sender.py:transition_state():613] send defer: 11 +2024-05-22 16:46:02,052 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:02,052 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 16:46:02,053 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:02,053 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 16:46:02,053 INFO SenderThread:1526 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 16:46:02,053 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 12 +2024-05-22 16:46:02,053 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:02,053 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 16:46:02,053 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:02,053 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 16:46:02,053 INFO SenderThread:1526 [file_stream.py:finish():601] file stream finish called +2024-05-22 16:46:02,120 INFO SenderThread:1526 [file_stream.py:finish():605] file stream finish is done +2024-05-22 16:46:02,120 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 13 +2024-05-22 16:46:02,120 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:02,120 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 16:46:02,120 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:02,120 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 16:46:02,120 INFO SenderThread:1526 [sender.py:transition_state():613] send defer: 14 +2024-05-22 16:46:02,121 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:46:02,121 INFO HandlerThread:1526 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 16:46:02,121 DEBUG SenderThread:1526 [sender.py:send():378] send: final +2024-05-22 16:46:02,121 DEBUG SenderThread:1526 [sender.py:send():378] send: footer +2024-05-22 16:46:02,121 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: defer +2024-05-22 16:46:02,121 INFO SenderThread:1526 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 16:46:02,122 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 16:46:02,122 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:46:02,122 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:46:02,122 DEBUG SenderThread:1526 [sender.py:send_request():405] send_request: server_info +2024-05-22 16:46:02,176 INFO MainThread:1526 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 16:46:02,176 INFO MainThread:1526 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 16:46:02,176 INFO MainThread:1526 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 16:46:02,177 DEBUG HandlerThread:1526 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 16:46:02,177 INFO HandlerThread:1526 [handler.py:finish():882] shutting down handler +2024-05-22 16:46:03,122 INFO WriterThread:1526 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/run-pxpzv850.wandb +2024-05-22 16:46:03,176 INFO SenderThread:1526 [sender.py:finish():1545] shutting down sender +2024-05-22 16:46:03,176 INFO SenderThread:1526 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 16:46:03,176 INFO SenderThread:1526 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..5ef85bb93aa87e865235eb21664acc1b57681d68 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 16:45:47,849 INFO MainThread:1371 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 16:45:47,849 INFO MainThread:1371 [wandb_setup.py:_flush():76] Configure stats pid to 1371 +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 16:45:47,850 WARNING MainThread:1371 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug.log +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/logs/debug-internal.log +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:init():560] calling init triggers +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:init():610] starting backend +2024-05-22 16:45:47,850 INFO MainThread:1371 [wandb_init.py:init():614] setting up manager +2024-05-22 16:45:47,854 INFO MainThread:1371 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 16:45:47,855 INFO MainThread:1371 [wandb_init.py:init():622] backend started and connected +2024-05-22 16:45:47,859 INFO MainThread:1371 [wandb_init.py:init():711] updated telemetry +2024-05-22 16:45:47,868 INFO MainThread:1371 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 16:45:48,177 INFO MainThread:1371 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 16:45:48,290 INFO MainThread:1371 [wandb_run.py:_on_init():2405] got version response +2024-05-22 16:45:48,291 INFO MainThread:1371 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 16:45:48,589 INFO MainThread:1371 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 16:45:48,590 INFO MainThread:1371 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 16:45:48,590 INFO MainThread:1371 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 16:45:48,590 INFO MainThread:1371 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 16:45:48,593 INFO MainThread:1371 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 16:46:03,178 WARNING MsgRouterThr:1371 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a120cbe0082b011c1c39d7d32768fe3d12f0caa3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716440664 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..c29613a4a07c592ea65200f220307718f7412bdb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:05:04:25,449 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:05:04:37,325 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:05:04:37,326 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:05:04:37,327 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000'} +2024-05-23:05:04:39,693 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..9a46aea87f6088421240830cd11d0e0de36f0371 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T05:04:25.086722", + "startedAt": "2024-05-23T05:04:24.526528", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2546.0233999999996, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3347.315, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3347.319, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3154.503, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3366.351, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3347.368, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 212.19525909423828 + } + }, + "memory": { + "total": 1007.43798828125 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..0724bd7df12f46eff44a25860c977ed8e96c2574 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 14}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..21dbf9c462176e61025947a5250f3ba0178e6fde --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug-internal.log @@ -0,0 +1,184 @@ +2024-05-23 05:04:24,572 INFO StreamThr :4012 [internal.py:wandb_internal():85] W&B internal server running at pid: 4012, started at: 2024-05-23 05:04:24.570667 +2024-05-23 05:04:24,576 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: status +2024-05-23 05:04:24,576 INFO WriterThread:4012 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/run-s7y9ilwe.wandb +2024-05-23 05:04:24,580 DEBUG SenderThread:4012 [sender.py:send():378] send: header +2024-05-23 05:04:24,587 DEBUG SenderThread:4012 [sender.py:send():378] send: run +2024-05-23 05:04:24,856 INFO SenderThread:4012 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files +2024-05-23 05:04:24,856 INFO SenderThread:4012 [sender.py:_start_run_threads():1123] run started: s7y9ilwe with start time 1716440664.570855 +2024-05-23 05:04:24,869 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 05:04:24,869 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: check_version +2024-05-23 05:04:25,000 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 05:04:25,007 DEBUG HandlerThread:4012 [system_info.py:__init__():26] System info init +2024-05-23 05:04:25,007 DEBUG HandlerThread:4012 [system_info.py:__init__():41] System info init done +2024-05-23 05:04:25,007 INFO HandlerThread:4012 [system_monitor.py:start():194] Starting system monitor +2024-05-23 05:04:25,008 INFO SystemMonitor:4012 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 05:04:25,008 INFO HandlerThread:4012 [system_monitor.py:probe():214] Collecting system info +2024-05-23 05:04:25,016 INFO SystemMonitor:4012 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 05:04:25,016 INFO SystemMonitor:4012 [interfaces.py:start():188] Started disk monitoring +2024-05-23 05:04:25,018 INFO SystemMonitor:4012 [interfaces.py:start():188] Started memory monitoring +2024-05-23 05:04:25,019 INFO SystemMonitor:4012 [interfaces.py:start():188] Started network monitoring +2024-05-23 05:04:25,086 DEBUG HandlerThread:4012 [system_info.py:probe():150] Probing system +2024-05-23 05:04:25,116 DEBUG HandlerThread:4012 [system_info.py:_probe_git():135] Probing git +2024-05-23 05:04:25,172 ERROR HandlerThread:4012 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 05:04:25,172 DEBUG HandlerThread:4012 [system_info.py:_probe_git():143] Probing git done +2024-05-23 05:04:25,172 DEBUG HandlerThread:4012 [system_info.py:probe():198] Probing system done +2024-05-23 05:04:25,172 DEBUG HandlerThread:4012 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T05:04:25.086722', 'startedAt': '2024-05-23T05:04:24.526528', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step2000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2546.0233999999996, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3347.315, 'min': 800.0, 'max': 3400.0}, {'current': 3347.319, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3154.503, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3366.351, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3347.368, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.19525909423828}}, 'memory': {'total': 1007.43798828125}} +2024-05-23 05:04:25,172 INFO HandlerThread:4012 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 05:04:25,172 INFO HandlerThread:4012 [system_monitor.py:probe():227] Publishing system info +2024-05-23 05:04:25,187 INFO HandlerThread:4012 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 05:04:25,193 DEBUG SenderThread:4012 [sender.py:send():378] send: files +2024-05-23 05:04:25,193 INFO SenderThread:4012 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 05:04:25,440 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 05:04:25,440 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 05:04:25,440 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: python_packages +2024-05-23 05:04:25,443 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: stop_status +2024-05-23 05:04:25,512 DEBUG SenderThread:4012 [sender.py:send():378] send: telemetry +2024-05-23 05:04:25,784 INFO wandb-upload_0:4012 [upload_job.py:push():130] Uploaded file /tmp/tmpnrarq25bwandb/9vzk6wuv-wandb-metadata.json +2024-05-23 05:04:25,860 INFO Thread-12 :4012 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt +2024-05-23 05:04:25,860 INFO Thread-12 :4012 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log +2024-05-23 05:04:25,860 INFO Thread-12 :4012 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-metadata.json +2024-05-23 05:04:27,884 INFO Thread-12 :4012 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log +2024-05-23 05:04:30,514 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 05:04:35,515 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 05:04:37,906 INFO Thread-12 :4012 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log +2024-05-23 05:04:39,705 DEBUG SenderThread:4012 [sender.py:send():378] send: exit +2024-05-23 05:04:39,706 INFO SenderThread:4012 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 05:04:39,706 INFO SenderThread:4012 [sender.py:send_exit():587] handling runtime: 14 +2024-05-23 05:04:39,707 INFO SenderThread:4012 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 05:04:39,708 INFO SenderThread:4012 [sender.py:send_exit():593] send defer +2024-05-23 05:04:39,708 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,708 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 05:04:39,708 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,708 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 05:04:39,708 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 1 +2024-05-23 05:04:39,708 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,708 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 05:04:39,708 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,708 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 05:04:39,708 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 2 +2024-05-23 05:04:39,708 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,708 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 05:04:39,708 INFO HandlerThread:4012 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 05:04:39,708 DEBUG SystemMonitor:4012 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 05:04:39,709 DEBUG SystemMonitor:4012 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 05:04:39,709 DEBUG SystemMonitor:4012 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 05:04:39,709 INFO HandlerThread:4012 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 05:04:39,710 INFO HandlerThread:4012 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 05:04:39,710 INFO HandlerThread:4012 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 05:04:39,710 INFO HandlerThread:4012 [interfaces.py:finish():200] Joined network monitor +2024-05-23 05:04:39,710 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,710 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 05:04:39,710 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 3 +2024-05-23 05:04:39,710 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,710 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 05:04:39,710 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,710 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 05:04:39,710 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 4 +2024-05-23 05:04:39,710 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,710 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 05:04:39,710 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,711 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 05:04:39,711 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 5 +2024-05-23 05:04:39,711 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,711 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 05:04:39,711 DEBUG SenderThread:4012 [sender.py:send():378] send: summary +2024-05-23 05:04:39,712 INFO SenderThread:4012 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 05:04:39,712 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,712 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 05:04:39,712 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 6 +2024-05-23 05:04:39,712 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,712 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 05:04:39,712 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,712 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 05:04:39,717 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 05:04:39,802 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 7 +2024-05-23 05:04:39,802 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:39,802 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 05:04:39,802 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:39,802 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 05:04:39,921 INFO Thread-12 :4012 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml +2024-05-23 05:04:39,921 INFO Thread-12 :4012 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json +2024-05-23 05:04:40,706 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:04:41,542 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 8 +2024-05-23 05:04:41,542 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:04:41,542 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:41,542 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 05:04:41,543 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:41,543 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 05:04:41,543 INFO SenderThread:4012 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 05:04:41,543 INFO SenderThread:4012 [job_builder.py:_get_source_type():576] no source found +2024-05-23 05:04:41,543 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 9 +2024-05-23 05:04:41,543 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:41,544 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 05:04:41,544 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:41,544 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 05:04:41,544 INFO SenderThread:4012 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 05:04:41,706 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:04:41,923 INFO SenderThread:4012 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log +2024-05-23 05:04:41,923 INFO SenderThread:4012 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files +2024-05-23 05:04:41,924 INFO SenderThread:4012 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json wandb-summary.json +2024-05-23 05:04:41,924 INFO SenderThread:4012 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml config.yaml +2024-05-23 05:04:41,924 INFO SenderThread:4012 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log output.log +2024-05-23 05:04:41,924 INFO SenderThread:4012 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt requirements.txt +2024-05-23 05:04:41,924 INFO SenderThread:4012 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-metadata.json wandb-metadata.json +2024-05-23 05:04:41,924 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 10 +2024-05-23 05:04:41,924 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:04:41,925 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:41,925 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 05:04:41,927 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:41,927 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 05:04:41,927 INFO SenderThread:4012 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 05:04:42,201 INFO wandb-upload_0:4012 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/wandb-summary.json +2024-05-23 05:04:42,533 INFO wandb-upload_2:4012 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/output.log +2024-05-23 05:04:42,547 INFO wandb-upload_1:4012 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/config.yaml +2024-05-23 05:04:42,552 INFO wandb-upload_3:4012 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/files/requirements.txt +2024-05-23 05:04:42,707 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:04:42,707 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:04:42,752 INFO Thread-11 (_thread_body):4012 [sender.py:transition_state():613] send defer: 11 +2024-05-23 05:04:42,752 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:42,752 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 05:04:42,752 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:42,752 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 05:04:42,752 INFO SenderThread:4012 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 05:04:42,753 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 12 +2024-05-23 05:04:42,753 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:42,753 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 05:04:42,753 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:42,753 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 05:04:42,753 INFO SenderThread:4012 [file_stream.py:finish():601] file stream finish called +2024-05-23 05:04:42,813 INFO SenderThread:4012 [file_stream.py:finish():605] file stream finish is done +2024-05-23 05:04:42,814 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 13 +2024-05-23 05:04:42,814 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:42,814 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 05:04:42,814 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:42,814 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 05:04:42,814 INFO SenderThread:4012 [sender.py:transition_state():613] send defer: 14 +2024-05-23 05:04:42,814 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:04:42,814 INFO HandlerThread:4012 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 05:04:42,815 DEBUG SenderThread:4012 [sender.py:send():378] send: final +2024-05-23 05:04:42,815 DEBUG SenderThread:4012 [sender.py:send():378] send: footer +2024-05-23 05:04:42,815 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: defer +2024-05-23 05:04:42,815 INFO SenderThread:4012 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 05:04:42,816 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 05:04:42,816 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:04:42,816 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:04:42,816 DEBUG SenderThread:4012 [sender.py:send_request():405] send_request: server_info +2024-05-23 05:04:42,881 INFO MainThread:4012 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 05:04:42,881 INFO MainThread:4012 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 05:04:42,881 INFO MainThread:4012 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 05:04:42,881 DEBUG HandlerThread:4012 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 05:04:42,881 INFO HandlerThread:4012 [handler.py:finish():882] shutting down handler +2024-05-23 05:04:43,816 INFO WriterThread:4012 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/run-s7y9ilwe.wandb +2024-05-23 05:04:43,882 INFO SenderThread:4012 [sender.py:finish():1545] shutting down sender +2024-05-23 05:04:43,882 INFO SenderThread:4012 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 05:04:43,882 INFO SenderThread:4012 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..385eeb19cc0f2300bbcc4986bc0e148490dc1642 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Configure stats pid to 3857 +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 05:04:24,564 WARNING MainThread:3857 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug.log +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/logs/debug-internal.log +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:init():560] calling init triggers +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:init():610] starting backend +2024-05-23 05:04:24,564 INFO MainThread:3857 [wandb_init.py:init():614] setting up manager +2024-05-23 05:04:24,567 INFO MainThread:3857 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 05:04:24,570 INFO MainThread:3857 [wandb_init.py:init():622] backend started and connected +2024-05-23 05:04:24,574 INFO MainThread:3857 [wandb_init.py:init():711] updated telemetry +2024-05-23 05:04:24,586 INFO MainThread:3857 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 05:04:24,869 INFO MainThread:3857 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 05:04:24,992 INFO MainThread:3857 [wandb_run.py:_on_init():2405] got version response +2024-05-23 05:04:24,992 INFO MainThread:3857 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 05:04:25,441 INFO MainThread:3857 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 05:04:25,441 INFO MainThread:3857 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 05:04:25,442 INFO MainThread:3857 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 05:04:25,442 INFO MainThread:3857 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 05:04:25,446 INFO MainThread:3857 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 05:04:43,883 WARNING MsgRouterThr:3857 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/run-s7y9ilwe.wandb b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/run-s7y9ilwe.wandb new file mode 100644 index 0000000000000000000000000000000000000000..56481256d826670cc129f6c47a4c93d4cb616811 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_050424-s7y9ilwe/run-s7y9ilwe.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b5f6a730addcd9a4b964fb5af525fae39ef5c75 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716441118 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..7ad605291c31927863b205da32046ed357ee9c65 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:05:11:59,385 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:05:12:03,946 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:05:12:03,947 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:05:12:03,947 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000'} +2024-05-23:05:12:05,167 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..494239a1d608bf83f012608589f9c7468664d1b4 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T05:11:59.256279", + "startedAt": "2024-05-23T05:11:58.845439", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2333.12500625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3383.13, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3387.653, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3312.786, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3325.16, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3384.188, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 212.1817169189453 + } + }, + "memory": { + "total": 1007.43798828125 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..77874020b80010ba80aa2100eaa5b75e2ee50efd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 05:11:58,860 INFO StreamThr :7319 [internal.py:wandb_internal():85] W&B internal server running at pid: 7319, started at: 2024-05-23 05:11:58.859619 +2024-05-23 05:11:58,861 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: status +2024-05-23 05:11:58,862 INFO WriterThread:7319 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/run-5sn3yiz7.wandb +2024-05-23 05:11:58,863 DEBUG SenderThread:7319 [sender.py:send():378] send: header +2024-05-23 05:11:58,873 DEBUG SenderThread:7319 [sender.py:send():378] send: run +2024-05-23 05:11:59,090 INFO SenderThread:7319 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files +2024-05-23 05:11:59,090 INFO SenderThread:7319 [sender.py:_start_run_threads():1123] run started: 5sn3yiz7 with start time 1716441118.859487 +2024-05-23 05:11:59,096 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 05:11:59,096 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: check_version +2024-05-23 05:11:59,183 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 05:11:59,185 DEBUG HandlerThread:7319 [system_info.py:__init__():26] System info init +2024-05-23 05:11:59,185 DEBUG HandlerThread:7319 [system_info.py:__init__():41] System info init done +2024-05-23 05:11:59,185 INFO HandlerThread:7319 [system_monitor.py:start():194] Starting system monitor +2024-05-23 05:11:59,185 INFO SystemMonitor:7319 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 05:11:59,185 INFO HandlerThread:7319 [system_monitor.py:probe():214] Collecting system info +2024-05-23 05:11:59,186 INFO SystemMonitor:7319 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 05:11:59,186 INFO SystemMonitor:7319 [interfaces.py:start():188] Started disk monitoring +2024-05-23 05:11:59,187 INFO SystemMonitor:7319 [interfaces.py:start():188] Started memory monitoring +2024-05-23 05:11:59,187 INFO SystemMonitor:7319 [interfaces.py:start():188] Started network monitoring +2024-05-23 05:11:59,256 DEBUG HandlerThread:7319 [system_info.py:probe():150] Probing system +2024-05-23 05:11:59,264 DEBUG HandlerThread:7319 [system_info.py:_probe_git():135] Probing git +2024-05-23 05:11:59,283 ERROR HandlerThread:7319 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 05:11:59,283 DEBUG HandlerThread:7319 [system_info.py:_probe_git():143] Probing git done +2024-05-23 05:11:59,283 DEBUG HandlerThread:7319 [system_info.py:probe():198] Probing system done +2024-05-23 05:11:59,283 DEBUG HandlerThread:7319 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T05:11:59.256279', 'startedAt': '2024-05-23T05:11:58.845439', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2333.12500625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.998, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3383.13, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3387.653, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3312.786, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3325.16, 'min': 800.0, 'max': 3400.0}, {'current': 3384.188, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.998, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.1817169189453}}, 'memory': {'total': 1007.43798828125}} +2024-05-23 05:11:59,283 INFO HandlerThread:7319 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 05:11:59,283 INFO HandlerThread:7319 [system_monitor.py:probe():227] Publishing system info +2024-05-23 05:11:59,285 INFO HandlerThread:7319 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 05:11:59,289 DEBUG SenderThread:7319 [sender.py:send():378] send: files +2024-05-23 05:11:59,289 INFO SenderThread:7319 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 05:11:59,382 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 05:11:59,382 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: python_packages +2024-05-23 05:11:59,383 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 05:11:59,384 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: stop_status +2024-05-23 05:11:59,514 DEBUG SenderThread:7319 [sender.py:send():378] send: telemetry +2024-05-23 05:11:59,879 INFO wandb-upload_0:7319 [upload_job.py:push():130] Uploaded file /tmp/tmp8lrhk8rewandb/p6q78c0r-wandb-metadata.json +2024-05-23 05:12:00,092 INFO Thread-12 :7319 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt +2024-05-23 05:12:00,092 INFO Thread-12 :7319 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-metadata.json +2024-05-23 05:12:00,092 INFO Thread-12 :7319 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log +2024-05-23 05:12:02,092 INFO Thread-12 :7319 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log +2024-05-23 05:12:03,947 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 05:12:05,172 DEBUG SenderThread:7319 [sender.py:send():378] send: exit +2024-05-23 05:12:05,172 INFO SenderThread:7319 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 05:12:05,172 INFO SenderThread:7319 [sender.py:send_exit():587] handling runtime: 5 +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:send_exit():593] send defer +2024-05-23 05:12:05,174 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,174 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 05:12:05,174 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 1 +2024-05-23 05:12:05,174 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,174 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 05:12:05,174 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 05:12:05,174 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 2 +2024-05-23 05:12:05,174 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,175 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 05:12:05,175 INFO HandlerThread:7319 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 05:12:05,175 INFO HandlerThread:7319 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 05:12:05,175 DEBUG SystemMonitor:7319 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 05:12:05,176 INFO HandlerThread:7319 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 05:12:05,176 DEBUG SystemMonitor:7319 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 05:12:05,176 INFO HandlerThread:7319 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 05:12:05,176 DEBUG SystemMonitor:7319 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 05:12:05,176 INFO HandlerThread:7319 [interfaces.py:finish():200] Joined network monitor +2024-05-23 05:12:05,178 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,178 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 05:12:05,178 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 3 +2024-05-23 05:12:05,178 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,178 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 05:12:05,178 DEBUG SenderThread:7319 [sender.py:send():378] send: stats +2024-05-23 05:12:05,178 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,179 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 05:12:05,179 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 4 +2024-05-23 05:12:05,179 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,179 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 05:12:05,179 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,179 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 05:12:05,179 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 5 +2024-05-23 05:12:05,179 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,179 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 05:12:05,179 DEBUG SenderThread:7319 [sender.py:send():378] send: summary +2024-05-23 05:12:05,180 INFO SenderThread:7319 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 05:12:05,180 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,180 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 05:12:05,180 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 6 +2024-05-23 05:12:05,180 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,180 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 05:12:05,180 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,180 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 05:12:05,183 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 05:12:05,502 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 7 +2024-05-23 05:12:05,502 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:05,502 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 05:12:05,502 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:05,502 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 05:12:06,096 INFO Thread-12 :7319 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml +2024-05-23 05:12:06,097 INFO Thread-12 :7319 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log +2024-05-23 05:12:06,097 INFO Thread-12 :7319 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json +2024-05-23 05:12:06,172 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:12:07,535 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 8 +2024-05-23 05:12:07,535 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:12:07,536 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:07,536 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 05:12:07,536 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:07,536 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 05:12:07,536 INFO SenderThread:7319 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 05:12:07,537 INFO SenderThread:7319 [job_builder.py:_get_source_type():576] no source found +2024-05-23 05:12:07,537 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 9 +2024-05-23 05:12:07,537 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:07,537 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 05:12:07,537 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:07,537 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 05:12:07,537 INFO SenderThread:7319 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 05:12:08,100 INFO SenderThread:7319 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log +2024-05-23 05:12:08,101 INFO SenderThread:7319 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files +2024-05-23 05:12:08,101 INFO SenderThread:7319 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml config.yaml +2024-05-23 05:12:08,101 INFO SenderThread:7319 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-metadata.json wandb-metadata.json +2024-05-23 05:12:08,101 INFO SenderThread:7319 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt requirements.txt +2024-05-23 05:12:08,101 INFO SenderThread:7319 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json wandb-summary.json +2024-05-23 05:12:08,104 INFO SenderThread:7319 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log output.log +2024-05-23 05:12:08,106 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 10 +2024-05-23 05:12:08,107 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:08,107 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 05:12:08,107 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:08,108 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 05:12:08,109 INFO SenderThread:7319 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 05:12:08,172 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:12:08,173 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:12:08,350 INFO wandb-upload_0:7319 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/requirements.txt +2024-05-23 05:12:08,502 INFO wandb-upload_1:7319 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/config.yaml +2024-05-23 05:12:08,605 INFO wandb-upload_2:7319 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/wandb-summary.json +2024-05-23 05:12:08,642 INFO wandb-upload_3:7319 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/files/output.log +2024-05-23 05:12:08,842 INFO Thread-11 (_thread_body):7319 [sender.py:transition_state():613] send defer: 11 +2024-05-23 05:12:08,842 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:08,842 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 05:12:08,842 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:08,842 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 05:12:08,842 INFO SenderThread:7319 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 05:12:08,843 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 12 +2024-05-23 05:12:08,843 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:08,843 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 05:12:08,843 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:08,843 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 05:12:08,843 INFO SenderThread:7319 [file_stream.py:finish():601] file stream finish called +2024-05-23 05:12:09,043 INFO SenderThread:7319 [file_stream.py:finish():605] file stream finish is done +2024-05-23 05:12:09,043 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 13 +2024-05-23 05:12:09,043 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:09,043 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 05:12:09,044 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:09,044 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 05:12:09,044 INFO SenderThread:7319 [sender.py:transition_state():613] send defer: 14 +2024-05-23 05:12:09,044 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: defer +2024-05-23 05:12:09,044 INFO HandlerThread:7319 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 05:12:09,044 DEBUG SenderThread:7319 [sender.py:send():378] send: final +2024-05-23 05:12:09,044 DEBUG SenderThread:7319 [sender.py:send():378] send: footer +2024-05-23 05:12:09,044 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: defer +2024-05-23 05:12:09,044 INFO SenderThread:7319 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 05:12:09,045 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:12:09,045 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:12:09,045 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 05:12:09,045 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 05:12:09,045 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 05:12:09,046 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 05:12:09,046 DEBUG SenderThread:7319 [sender.py:send_request():405] send_request: server_info +2024-05-23 05:12:09,047 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 05:12:09,047 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 05:12:09,102 INFO MainThread:7319 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 05:12:09,102 INFO MainThread:7319 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 05:12:09,102 INFO MainThread:7319 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 05:12:09,102 DEBUG HandlerThread:7319 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 05:12:09,102 INFO HandlerThread:7319 [handler.py:finish():882] shutting down handler +2024-05-23 05:12:10,046 INFO WriterThread:7319 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/run-5sn3yiz7.wandb +2024-05-23 05:12:10,102 INFO SenderThread:7319 [sender.py:finish():1545] shutting down sender +2024-05-23 05:12:10,102 INFO SenderThread:7319 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 05:12:10,102 INFO SenderThread:7319 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..5d6e46efc7a22af4391eb408b8435491b2cdec4d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Configure stats pid to 7164 +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 05:11:58,856 WARNING MainThread:7164 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 05:11:58,856 INFO MainThread:7164 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug.log +2024-05-23 05:11:58,857 INFO MainThread:7164 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/logs/debug-internal.log +2024-05-23 05:11:58,857 INFO MainThread:7164 [wandb_init.py:init():560] calling init triggers +2024-05-23 05:11:58,857 INFO MainThread:7164 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 05:11:58,857 INFO MainThread:7164 [wandb_init.py:init():610] starting backend +2024-05-23 05:11:58,857 INFO MainThread:7164 [wandb_init.py:init():614] setting up manager +2024-05-23 05:11:58,858 INFO MainThread:7164 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 05:11:58,859 INFO MainThread:7164 [wandb_init.py:init():622] backend started and connected +2024-05-23 05:11:58,861 INFO MainThread:7164 [wandb_init.py:init():711] updated telemetry +2024-05-23 05:11:58,873 INFO MainThread:7164 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 05:11:59,096 INFO MainThread:7164 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 05:11:59,178 INFO MainThread:7164 [wandb_run.py:_on_init():2405] got version response +2024-05-23 05:11:59,179 INFO MainThread:7164 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 05:11:59,383 INFO MainThread:7164 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 05:11:59,383 INFO MainThread:7164 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 05:11:59,383 INFO MainThread:7164 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 05:11:59,383 INFO MainThread:7164 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 05:11:59,384 INFO MainThread:7164 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 05:12:10,103 WARNING MsgRouterThr:7164 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/run-5sn3yiz7.wandb b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/run-5sn3yiz7.wandb new file mode 100644 index 0000000000000000000000000000000000000000..3a56070019be32fc5b81e4df6e6909cc6a7e63ac Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_051158-5sn3yiz7/run-5sn3yiz7.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ff3b957ffc19cc6f76c5944500387fd3b0fc3d8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml @@ -0,0 +1,44 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717051567 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 13 + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0e80035859f8aa4da7e5a683586e9bf1faa428a2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log @@ -0,0 +1,38 @@ + +2024-05-30:06:46:08,431 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:06:46:17,722 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:06:46:17,723 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:06:46:17,723 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step30000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/'} +2024-05-30:06:46:20,160 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 237, in __init__ + self._create_tokenizer( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 630, in _create_tokenizer + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained + config = AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 644, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 699, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 360, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/ does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k//main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7391d0fb5302364497bd6017486c36f0caae0613 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt @@ -0,0 +1,154 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.2 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.15.2 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.36.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..d1502cd7b0f6ddbfecc3c84b23fe58d7c6ae8bc5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T06:46:08.216773", + "startedAt": "2024-05-30T06:46:07.723902", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step30000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step30000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-15-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.49998125, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 212.28859329223633 + } + }, + "memory": { + "total": 1007.4379692077637 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..0396467f7569a8166ce6a4890676d52689b450a7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 38}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..b8d3a9a8d44d1ff4a4a6763aa6354b028ccea705 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug-internal.log @@ -0,0 +1,196 @@ +2024-05-30 06:46:07,744 INFO StreamThr :899 [internal.py:wandb_internal():85] W&B internal server running at pid: 899, started at: 2024-05-30 06:46:07.742978 +2024-05-30 06:46:07,749 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status +2024-05-30 06:46:07,749 INFO WriterThread:899 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/run-wndsuxuq.wandb +2024-05-30 06:46:07,753 DEBUG SenderThread:899 [sender.py:send():378] send: header +2024-05-30 06:46:07,756 DEBUG SenderThread:899 [sender.py:send():378] send: run +2024-05-30 06:46:08,021 INFO SenderThread:899 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files +2024-05-30 06:46:08,021 INFO SenderThread:899 [sender.py:_start_run_threads():1123] run started: wndsuxuq with start time 1717051567.743549 +2024-05-30 06:46:08,027 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 06:46:08,027 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: check_version +2024-05-30 06:46:08,141 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 06:46:08,144 DEBUG HandlerThread:899 [system_info.py:__init__():26] System info init +2024-05-30 06:46:08,144 DEBUG HandlerThread:899 [system_info.py:__init__():41] System info init done +2024-05-30 06:46:08,144 INFO HandlerThread:899 [system_monitor.py:start():194] Starting system monitor +2024-05-30 06:46:08,144 INFO SystemMonitor:899 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 06:46:08,144 INFO HandlerThread:899 [system_monitor.py:probe():214] Collecting system info +2024-05-30 06:46:08,151 INFO SystemMonitor:899 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 06:46:08,151 INFO SystemMonitor:899 [interfaces.py:start():188] Started disk monitoring +2024-05-30 06:46:08,152 INFO SystemMonitor:899 [interfaces.py:start():188] Started memory monitoring +2024-05-30 06:46:08,154 INFO SystemMonitor:899 [interfaces.py:start():188] Started network monitoring +2024-05-30 06:46:08,216 DEBUG HandlerThread:899 [system_info.py:probe():150] Probing system +2024-05-30 06:46:08,220 DEBUG HandlerThread:899 [system_info.py:_probe_git():135] Probing git +2024-05-30 06:46:08,230 ERROR HandlerThread:899 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 06:46:08,230 DEBUG HandlerThread:899 [system_info.py:_probe_git():143] Probing git done +2024-05-30 06:46:08,230 DEBUG HandlerThread:899 [system_info.py:probe():198] Probing system done +2024-05-30 06:46:08,230 DEBUG HandlerThread:899 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T06:46:08.216773', 'startedAt': '2024-05-30T06:46:07.723902', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step30000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step30000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-15-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.49998125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.28859329223633}}, 'memory': {'total': 1007.4379692077637}} +2024-05-30 06:46:08,230 INFO HandlerThread:899 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 06:46:08,230 INFO HandlerThread:899 [system_monitor.py:probe():227] Publishing system info +2024-05-30 06:46:08,234 INFO HandlerThread:899 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 06:46:08,242 DEBUG SenderThread:899 [sender.py:send():378] send: files +2024-05-30 06:46:08,242 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 06:46:08,425 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 06:46:08,425 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: python_packages +2024-05-30 06:46:08,426 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:08,427 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:08,582 DEBUG SenderThread:899 [sender.py:send():378] send: telemetry +2024-05-30 06:46:08,962 INFO wandb-upload_0:899 [upload_job.py:push():130] Uploaded file /tmp/tmpknvv4459wandb/5l45f24v-wandb-metadata.json +2024-05-30 06:46:09,023 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-metadata.json +2024-05-30 06:46:09,023 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:09,023 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt +2024-05-30 06:46:11,023 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:13,600 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:18,724 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:19,030 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:23,040 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:23,426 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:23,427 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:24,520 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:29,521 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:34,522 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:38,426 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:38,427 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:40,521 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:41,082 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml +2024-05-30 06:46:45,206 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:46,033 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:46,984 DEBUG SenderThread:899 [sender.py:send():378] send: exit +2024-05-30 06:46:46,984 INFO SenderThread:899 [sender.py:send_exit():585] handling exit code: 1 +2024-05-30 06:46:46,984 INFO SenderThread:899 [sender.py:send_exit():587] handling runtime: 38 +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:send_exit():593] send defer +2024-05-30 06:46:46,988 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,988 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-30 06:46:46,988 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 1 +2024-05-30 06:46:46,988 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,988 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-30 06:46:46,988 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-30 06:46:46,988 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 2 +2024-05-30 06:46:46,989 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,989 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-30 06:46:46,989 INFO HandlerThread:899 [system_monitor.py:finish():203] Stopping system monitor +2024-05-30 06:46:46,989 DEBUG SystemMonitor:899 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-30 06:46:46,989 DEBUG SystemMonitor:899 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-30 06:46:46,989 DEBUG SystemMonitor:899 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-30 06:46:46,992 INFO HandlerThread:899 [interfaces.py:finish():200] Joined cpu monitor +2024-05-30 06:46:46,992 INFO HandlerThread:899 [interfaces.py:finish():200] Joined disk monitor +2024-05-30 06:46:46,992 INFO HandlerThread:899 [interfaces.py:finish():200] Joined memory monitor +2024-05-30 06:46:46,992 INFO HandlerThread:899 [interfaces.py:finish():200] Joined network monitor +2024-05-30 06:46:46,993 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,993 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-30 06:46:46,993 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 3 +2024-05-30 06:46:46,993 DEBUG SenderThread:899 [sender.py:send():378] send: stats +2024-05-30 06:46:46,994 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,994 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-30 06:46:46,994 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,994 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-30 06:46:46,994 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 4 +2024-05-30 06:46:46,994 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,994 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-30 06:46:46,994 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,994 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-30 06:46:46,994 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 5 +2024-05-30 06:46:46,994 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,994 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-30 06:46:46,995 DEBUG SenderThread:899 [sender.py:send():378] send: summary +2024-05-30 06:46:46,995 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 06:46:46,996 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,996 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-30 06:46:46,996 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 6 +2024-05-30 06:46:46,996 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,996 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-30 06:46:46,996 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,996 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-30 06:46:46,996 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 7 +2024-05-30 06:46:46,996 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:46,996 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:46,996 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-30 06:46:46,996 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:46,996 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-30 06:46:47,432 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json +2024-05-30 06:46:47,984 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:48,839 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 8 +2024-05-30 06:46:48,839 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:48,839 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:48,839 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-30 06:46:48,839 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:48,839 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-30 06:46:48,839 INFO SenderThread:899 [job_builder.py:build():432] Attempting to build job artifact +2024-05-30 06:46:48,840 INFO SenderThread:899 [job_builder.py:_get_source_type():576] no source found +2024-05-30 06:46:48,840 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 9 +2024-05-30 06:46:48,840 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:48,840 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-30 06:46:48,841 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:48,841 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-30 06:46:48,841 INFO SenderThread:899 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-30 06:46:48,984 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:49,433 INFO SenderThread:899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:49,434 INFO SenderThread:899 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files +2024-05-30 06:46:49,434 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json wandb-summary.json +2024-05-30 06:46:49,434 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log output.log +2024-05-30 06:46:49,436 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-metadata.json wandb-metadata.json +2024-05-30 06:46:49,437 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml config.yaml +2024-05-30 06:46:49,437 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt requirements.txt +2024-05-30 06:46:49,437 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 10 +2024-05-30 06:46:49,437 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:49,437 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:49,437 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-30 06:46:49,437 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:49,438 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-30 06:46:49,438 INFO SenderThread:899 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 06:46:49,932 INFO wandb-upload_0:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/wandb-summary.json +2024-05-30 06:46:49,985 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:49,985 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:50,027 INFO wandb-upload_1:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/output.log +2024-05-30 06:46:50,157 INFO wandb-upload_3:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/requirements.txt +2024-05-30 06:46:50,194 INFO wandb-upload_2:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/files/config.yaml +2024-05-30 06:46:50,394 INFO Thread-11 (_thread_body):899 [sender.py:transition_state():613] send defer: 11 +2024-05-30 06:46:50,394 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:50,394 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-30 06:46:50,394 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:50,394 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-30 06:46:50,394 INFO SenderThread:899 [file_pusher.py:join():175] waiting for file pusher +2024-05-30 06:46:50,395 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 12 +2024-05-30 06:46:50,395 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:50,395 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-30 06:46:50,395 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:50,395 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-30 06:46:50,395 INFO SenderThread:899 [file_stream.py:finish():601] file stream finish called +2024-05-30 06:46:50,463 INFO SenderThread:899 [file_stream.py:finish():605] file stream finish is done +2024-05-30 06:46:50,463 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 13 +2024-05-30 06:46:50,463 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:50,463 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-30 06:46:50,463 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:50,463 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-30 06:46:50,463 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 14 +2024-05-30 06:46:50,463 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:50,463 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-30 06:46:50,463 DEBUG SenderThread:899 [sender.py:send():378] send: final +2024-05-30 06:46:50,464 DEBUG SenderThread:899 [sender.py:send():378] send: footer +2024-05-30 06:46:50,464 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:50,464 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-30 06:46:50,464 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:50,464 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:50,465 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:50,465 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: server_info +2024-05-30 06:46:50,465 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: get_summary +2024-05-30 06:46:50,465 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-30 06:46:50,465 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-30 06:46:50,465 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:50,466 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: server_info +2024-05-30 06:46:50,531 INFO MainThread:899 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-30 06:46:50,531 INFO MainThread:899 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-30 06:46:50,531 INFO MainThread:899 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-30 06:46:50,532 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: shutdown +2024-05-30 06:46:50,532 INFO HandlerThread:899 [handler.py:finish():882] shutting down handler +2024-05-30 06:46:51,466 INFO WriterThread:899 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/run-wndsuxuq.wandb +2024-05-30 06:46:51,531 INFO SenderThread:899 [sender.py:finish():1545] shutting down sender +2024-05-30 06:46:51,531 INFO SenderThread:899 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 06:46:51,531 INFO SenderThread:899 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..509bb8a94e48f6bc04fcb04e925cf2aa79342c9e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Configure stats pid to 743 +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 06:46:07,737 WARNING MainThread:743 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 06:46:07,737 INFO MainThread:743 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug.log +2024-05-30 06:46:07,738 INFO MainThread:743 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/logs/debug-internal.log +2024-05-30 06:46:07,738 INFO MainThread:743 [wandb_init.py:init():560] calling init triggers +2024-05-30 06:46:07,738 INFO MainThread:743 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 06:46:07,738 INFO MainThread:743 [wandb_init.py:init():610] starting backend +2024-05-30 06:46:07,738 INFO MainThread:743 [wandb_init.py:init():614] setting up manager +2024-05-30 06:46:07,742 INFO MainThread:743 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 06:46:07,743 INFO MainThread:743 [wandb_init.py:init():622] backend started and connected +2024-05-30 06:46:07,746 INFO MainThread:743 [wandb_init.py:init():711] updated telemetry +2024-05-30 06:46:07,755 INFO MainThread:743 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 06:46:08,026 INFO MainThread:743 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 06:46:08,135 INFO MainThread:743 [wandb_run.py:_on_init():2405] got version response +2024-05-30 06:46:08,135 INFO MainThread:743 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 06:46:08,426 INFO MainThread:743 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 06:46:08,426 INFO MainThread:743 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 06:46:08,426 INFO MainThread:743 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 06:46:08,426 INFO MainThread:743 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 06:46:08,428 INFO MainThread:743 [wandb_init.py:init():838] run started, returning control to user process +2024-05-30 06:46:51,533 WARNING MsgRouterThr:743 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/run-wndsuxuq.wandb b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/run-wndsuxuq.wandb new file mode 100644 index 0000000000000000000000000000000000000000..2b8af469efe6fc8e31eeb15bf4e86f7dbd00012a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240530_064607-wndsuxuq/run-wndsuxuq.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..897d2b296d1594f4c8c3ef65f621f9b38730a86f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/config.yaml @@ -0,0 +1,33 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717059084 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 13 + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..4b61993de6f40725d768aa5a7371fe30fb877a3e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log @@ -0,0 +1,5 @@ + +2024-05-30:08:51:24,916 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:08:51:34,421 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:08:51:34,422 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:08:51:34,422 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/'} diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7391d0fb5302364497bd6017486c36f0caae0613 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/requirements.txt @@ -0,0 +1,154 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.2 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.15.2 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.36.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..517582179d1f0347f31c9a29e00e5430d36f86ea --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T08:51:24.674067", + "startedAt": "2024-05-30T08:51:24.145625", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step10000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-5-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.5, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3197.19, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 210.71866607666016 + } + }, + "memory": { + "total": 1007.4379577636719 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..b7b56b3826f6e758d420bc1496eb5f1dd21ef0d8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug-internal.log @@ -0,0 +1,52 @@ +2024-05-30 08:51:24,167 INFO StreamThr :904 [internal.py:wandb_internal():85] W&B internal server running at pid: 904, started at: 2024-05-30 08:51:24.165054 +2024-05-30 08:51:24,171 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status +2024-05-30 08:51:24,171 INFO WriterThread:904 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/run-ezl4k096.wandb +2024-05-30 08:51:24,174 DEBUG SenderThread:904 [sender.py:send():378] send: header +2024-05-30 08:51:24,178 DEBUG SenderThread:904 [sender.py:send():378] send: run +2024-05-30 08:51:24,474 INFO SenderThread:904 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files +2024-05-30 08:51:24,474 INFO SenderThread:904 [sender.py:_start_run_threads():1123] run started: ezl4k096 with start time 1717059084.164909 +2024-05-30 08:51:24,478 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 08:51:24,478 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: check_version +2024-05-30 08:51:24,599 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 08:51:24,601 DEBUG HandlerThread:904 [system_info.py:__init__():26] System info init +2024-05-30 08:51:24,601 DEBUG HandlerThread:904 [system_info.py:__init__():41] System info init done +2024-05-30 08:51:24,601 INFO HandlerThread:904 [system_monitor.py:start():194] Starting system monitor +2024-05-30 08:51:24,602 INFO SystemMonitor:904 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 08:51:24,602 INFO HandlerThread:904 [system_monitor.py:probe():214] Collecting system info +2024-05-30 08:51:24,609 INFO SystemMonitor:904 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 08:51:24,609 INFO SystemMonitor:904 [interfaces.py:start():188] Started disk monitoring +2024-05-30 08:51:24,615 INFO SystemMonitor:904 [interfaces.py:start():188] Started memory monitoring +2024-05-30 08:51:24,616 INFO SystemMonitor:904 [interfaces.py:start():188] Started network monitoring +2024-05-30 08:51:24,674 DEBUG HandlerThread:904 [system_info.py:probe():150] Probing system +2024-05-30 08:51:24,677 DEBUG HandlerThread:904 [system_info.py:_probe_git():135] Probing git +2024-05-30 08:51:24,687 ERROR HandlerThread:904 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 08:51:24,687 DEBUG HandlerThread:904 [system_info.py:_probe_git():143] Probing git done +2024-05-30 08:51:24,687 DEBUG HandlerThread:904 [system_info.py:probe():198] Probing system done +2024-05-30 08:51:24,687 DEBUG HandlerThread:904 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T08:51:24.674067', 'startedAt': '2024-05-30T08:51:24.145625', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step10000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-5-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.5, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3197.19, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 210.71866607666016}}, 'memory': {'total': 1007.4379577636719}} +2024-05-30 08:51:24,687 INFO HandlerThread:904 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 08:51:24,687 INFO HandlerThread:904 [system_monitor.py:probe():227] Publishing system info +2024-05-30 08:51:24,691 INFO HandlerThread:904 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 08:51:24,698 DEBUG SenderThread:904 [sender.py:send():378] send: files +2024-05-30 08:51:24,699 INFO SenderThread:904 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 08:51:24,909 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 08:51:24,909 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 08:51:24,909 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: python_packages +2024-05-30 08:51:24,911 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: stop_status +2024-05-30 08:51:25,074 DEBUG SenderThread:904 [sender.py:send():378] send: telemetry +2024-05-30 08:51:25,297 INFO wandb-upload_0:904 [upload_job.py:push():130] Uploaded file /tmp/tmpmqkk6e67wandb/8s3w0xci-wandb-metadata.json +2024-05-30 08:51:25,476 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/wandb-metadata.json +2024-05-30 08:51:25,476 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/requirements.txt +2024-05-30 08:51:25,477 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log +2024-05-30 08:51:27,476 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log +2024-05-30 08:51:30,078 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 08:51:35,423 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 08:51:35,483 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log +2024-05-30 08:51:37,495 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/files/output.log +2024-05-30 08:51:39,909 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 08:51:39,910 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: stop_status +2024-05-30 08:51:40,998 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..2991ddde748a35c93b260fcbe66a3625c126fce3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Configure stats pid to 748 +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 08:51:24,159 WARNING MainThread:748 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug.log +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/logs/debug-internal.log +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:init():560] calling init triggers +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:init():610] starting backend +2024-05-30 08:51:24,159 INFO MainThread:748 [wandb_init.py:init():614] setting up manager +2024-05-30 08:51:24,163 INFO MainThread:748 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 08:51:24,164 INFO MainThread:748 [wandb_init.py:init():622] backend started and connected +2024-05-30 08:51:24,168 INFO MainThread:748 [wandb_init.py:init():711] updated telemetry +2024-05-30 08:51:24,177 INFO MainThread:748 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 08:51:24,478 INFO MainThread:748 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 08:51:24,593 INFO MainThread:748 [wandb_run.py:_on_init():2405] got version response +2024-05-30 08:51:24,593 INFO MainThread:748 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 08:51:24,910 INFO MainThread:748 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 08:51:24,910 INFO MainThread:748 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 08:51:24,910 INFO MainThread:748 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 08:51:24,910 INFO MainThread:748 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 08:51:24,913 INFO MainThread:748 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/run-ezl4k096.wandb b/lm-evaluation-harness/wandb/run-20240530_085124-ezl4k096/run-ezl4k096.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a6858696a6d7ebc42a8f891733323b433b76461 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c4aabcf35b47bf285a5af84e06b45ecc233c052 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a5ac4dc1dae401ac5517e4a8eef91ee2a63724b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ee7283172598acf6dde679657114c0cb242515 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e99658ac1e885e1e80aa4ea9e52b7050f74abd1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__init__.py @@ -0,0 +1,157 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_keras_nlp_available, + is_tensorflow_text_available, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2OnnxConfig"], + "tokenization_gpt2": ["GPT2Tokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gpt2"] = [ + "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPT2DoubleHeadsModel", + "GPT2ForQuestionAnswering", + "GPT2ForSequenceClassification", + "GPT2ForTokenClassification", + "GPT2LMHeadModel", + "GPT2Model", + "GPT2PreTrainedModel", + "load_tf_weights_in_gpt2", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_gpt2"] = [ + "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFGPT2DoubleHeadsModel", + "TFGPT2ForSequenceClassification", + "TFGPT2LMHeadModel", + "TFGPT2MainLayer", + "TFGPT2Model", + "TFGPT2PreTrainedModel", + ] + +try: + if not is_keras_nlp_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_gpt2_tf"] = ["TFGPT2Tokenizer"] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"] + +if TYPE_CHECKING: + from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2OnnxConfig + from .tokenization_gpt2 import GPT2Tokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_gpt2_fast import GPT2TokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gpt2 import ( + GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + GPT2DoubleHeadsModel, + GPT2ForQuestionAnswering, + GPT2ForSequenceClassification, + GPT2ForTokenClassification, + GPT2LMHeadModel, + GPT2Model, + GPT2PreTrainedModel, + load_tf_weights_in_gpt2, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_gpt2 import ( + TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGPT2DoubleHeadsModel, + TFGPT2ForSequenceClassification, + TFGPT2LMHeadModel, + TFGPT2MainLayer, + TFGPT2Model, + TFGPT2PreTrainedModel, + ) + + try: + if not is_keras_nlp_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_gpt2_tf import TFGPT2Tokenizer + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/convert_gpt2_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/convert_gpt2_original_tf_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3be796665714f1621273da28b00726aeafe63ae7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/convert_gpt2_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bce06eebe6c8b160db88b94528722520f7ad67f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_gpt2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a50b00aa2597832ebd7b3287bc1bcc86c3a4ef06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..45495c0012fdd811342cf091a8f2df2cd568d7aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.py @@ -0,0 +1,272 @@ +# coding=utf-8 +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" OpenAI GPT-2 configuration""" +from collections import OrderedDict +from typing import Any, List, Mapping, Optional + +from ... import PreTrainedTokenizer, TensorType, is_torch_available +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfigWithPast, PatchingSpec +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class GPT2Config(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to + instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the GPT-2 + [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. + n_positions (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + n_inner (`int`, *optional*): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu_new"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + summary_type (`string`, *optional*, defaults to `"cls_index"`): + Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and + [`TFGPT2DoubleHeadsModel`]. + + Has to be one of the following options: + + - `"last"`: Take the last token hidden state (like XLNet). + - `"first"`: Take the first token hidden state (like BERT). + - `"mean"`: Take the mean of all tokens hidden states. + - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). + - `"attn"`: Not implemented now, use multi-head attention. + summary_use_proj (`bool`, *optional*, defaults to `True`): + Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and + [`TFGPT2DoubleHeadsModel`]. + + Whether or not to add a projection after the vector extraction. + summary_activation (`str`, *optional*): + Argument used when doing sequence summary. Used in for the multiple choice head in + [`GPT2DoubleHeadsModel`]. + + Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. + summary_proj_to_labels (`bool`, *optional*, defaults to `True`): + Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and + [`TFGPT2DoubleHeadsModel`]. + + Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. + summary_first_dropout (`float`, *optional*, defaults to 0.1): + Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and + [`TFGPT2DoubleHeadsModel`]. + + The dropout ratio to be used after the projection and activation. + scale_attn_weights (`bool`, *optional*, defaults to `True`): + Scale attention weights by dividing by sqrt(hidden_size).. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + bos_token_id (`int`, *optional*, defaults to 50256): + Id of the beginning of sentence token in the vocabulary. + eos_token_id (`int`, *optional*, defaults to 50256): + Id of the end of sentence token in the vocabulary. + scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): + Whether to additionally scale attention weights by `1 / layer_idx + 1`. + reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): + Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention + dot-product/softmax to float() when training with mixed precision. + + Example: + + ```python + >>> from transformers import GPT2Config, GPT2Model + + >>> # Initializing a GPT2 configuration + >>> configuration = GPT2Config() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = GPT2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "gpt2" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "hidden_size": "n_embd", + "max_position_embeddings": "n_positions", + "num_attention_heads": "n_head", + "num_hidden_layers": "n_layer", + } + + def __init__( + self, + vocab_size=50257, + n_positions=1024, + n_embd=768, + n_layer=12, + n_head=12, + n_inner=None, + activation_function="gelu_new", + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + summary_type="cls_index", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + scale_attn_weights=True, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + scale_attn_by_inverse_layer_idx=False, + reorder_and_upcast_attn=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_head = n_head + self.n_inner = n_inner + self.activation_function = activation_function + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.summary_type = summary_type + self.summary_use_proj = summary_use_proj + self.summary_activation = summary_activation + self.summary_first_dropout = summary_first_dropout + self.summary_proj_to_labels = summary_proj_to_labels + self.scale_attn_weights = scale_attn_weights + self.use_cache = use_cache + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx + self.reorder_and_upcast_attn = reorder_and_upcast_attn + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +class GPT2OnnxConfig(OnnxConfigWithPast): + def __init__( + self, + config: PretrainedConfig, + task: str = "default", + patching_specs: List[PatchingSpec] = None, + use_past: bool = False, + ): + super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) + if not getattr(self._config, "pad_token_id", None): + # TODO: how to do that better? + self._config.pad_token_id = 0 + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"} + else: + common_inputs["attention_mask"] = {0: "batch", 1: "sequence"} + + return common_inputs + + @property + def num_layers(self) -> int: + return self._config.n_layer + + @property + def num_attention_heads(self) -> int: + return self._config.n_head + + def generate_dummy_inputs( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + # We need to order the input in the way they appears in the forward() + ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]}) + + # Need to add the past_keys + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + past_shape = ( + batch, + self.num_attention_heads, + past_key_values_length, + self._config.hidden_size // self.num_attention_heads, + ) + ordered_inputs["past_key_values"] = [ + (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers) + ] + + ordered_inputs["attention_mask"] = common_inputs["attention_mask"] + if self.use_past: + mask_dtype = ordered_inputs["attention_mask"].dtype + ordered_inputs["attention_mask"] = torch.cat( + [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 + ) + + return ordered_inputs + + @property + def default_onnx_opset(self) -> int: + return 13 diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..066ba06503affdb8fa2ef1b2df1a4cf6539efc22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert OpenAI GPT checkpoint.""" + + +import argparse + +import torch + +from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 +from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging + + +logging.set_verbosity_info() + + +def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): + # Construct model + if gpt2_config_file == "": + config = GPT2Config() + else: + config = GPT2Config.from_json_file(gpt2_config_file) + model = GPT2Model(config) + + # Load weights from numpy + load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) + + # Save pytorch-model + pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME + pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME + print(f"Save PyTorch model to {pytorch_weights_dump_path}") + torch.save(model.state_dict(), pytorch_weights_dump_path) + print(f"Save configuration file to {pytorch_config_dump_path}") + with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: + f.write(config.to_json_string()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--gpt2_config_file", + default="", + type=str, + help=( + "An optional config json file corresponding to the pre-trained OpenAI model. \n" + "This specifies the model architecture." + ), + ) + args = parser.parse_args() + convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_flax_gpt2.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_flax_gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..c3ef377642a3c5b043ae36acb6443041dcf75742 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_flax_gpt2.py @@ -0,0 +1,779 @@ +# coding=utf-8 +# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import ( + FlaxBaseModelOutputWithPastAndCrossAttentions, + FlaxCausalLMOutputWithCrossAttentions, +) +from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_gpt2 import GPT2Config + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "openai-community/gpt2" +_CONFIG_FOR_DOC = "GPT2Config" + + +GPT2_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`GPT2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +GPT2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class FlaxConv1D(nn.Module): + features: int + use_bias: bool = True + dtype: Any = jnp.float32 + precision: Any = None + + @nn.compact + def __call__(self, inputs): + inputs = jnp.asarray(inputs, self.dtype) + kernel = self.param("kernel", jax.nn.initializers.normal(stddev=0.02), (self.features, inputs.shape[-1])) + kernel = jnp.asarray(kernel.transpose(), self.dtype) + y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())), precision=self.precision) + if self.use_bias: + bias = self.param("bias", jax.nn.initializers.zeros, (self.features,)) + bias = jnp.asarray(bias, self.dtype) + y = y + bias + return y + + +class FlaxGPT2Attention(nn.Module): + config: GPT2Config + dtype: jnp.dtype = jnp.float32 + causal: bool = True + is_cross_attention: bool = False + + def setup(self): + config = self.config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + + if self.is_cross_attention: + self.c_attn = FlaxConv1D(2 * self.embed_dim, dtype=self.dtype) + self.q_attn = FlaxConv1D(self.embed_dim, dtype=self.dtype) + else: + self.c_attn = FlaxConv1D(3 * self.embed_dim, dtype=self.dtype) + self.c_proj = FlaxConv1D(self.embed_dim, dtype=self.dtype) + + self.resid_dropout = nn.Dropout(rate=config.resid_pdrop) + + if self.causal: + self.causal_mask = make_causal_mask( + jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool" + ) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states, + key_value_states: Optional[jnp.ndarray] = None, + attention_mask=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size = hidden_states.shape[0] + + if not is_cross_attention: + qkv_out = self.c_attn(hidden_states) + query, key, value = jnp.split(qkv_out, 3, axis=2) + else: + q_out = self.q_attn(hidden_states) + (query,) = jnp.split(q_out, 1, axis=2) + kv_out = self.c_attn(key_value_states) + key, value = jnp.split(kv_out, 2, axis=2) + + query = self._split_heads(query) + key = self._split_heads(key) + value = self._split_heads(value) + + query_length, key_length = query.shape[1], key.shape[1] + + if self.causal: + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + # combine masks if needed + if attention_mask is not None and self.causal: + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + elif self.causal: + attention_mask = causal_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + dropout_rng = None + if not deterministic and self.config.attn_pdrop > 0.0: + dropout_rng = self.make_rng("dropout") + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.causal and (self.has_variable("cache", "cached_key") or init_cache): + key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) + + # transform boolean mask into float mask + if attention_mask is not None: + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + else: + attention_bias = None + + # usual dot product attention + attn_weights = dot_product_attention_weights( + query, + key, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.config.attn_pdrop, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) + attn_output = self._merge_heads(attn_output) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output, deterministic=deterministic) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +class FlaxGPT2MLP(nn.Module): + config: GPT2Config + intermediate_size: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + self.c_fc = FlaxConv1D(self.intermediate_size, dtype=self.dtype) + self.c_proj = FlaxConv1D(embed_dim, dtype=self.dtype) + self.act = ACT2FN[self.config.activation_function] + self.dropout = nn.Dropout(rate=self.config.resid_pdrop) + + def __call__(self, hidden_states, deterministic: bool = True): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + return hidden_states + + +class FlaxGPT2Block(nn.Module): + config: GPT2Config + dtype: jnp.dtype = jnp.float32 + + def setup(self): + hidden_size = self.config.hidden_size + inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + self.attn = FlaxGPT2Attention(self.config, dtype=self.dtype) + self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + + if self.config.add_cross_attention: + self.crossattention = FlaxGPT2Attention( + config=self.config, dtype=self.dtype, causal=False, is_cross_attention=True + ) + self.ln_cross_attn = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + + self.mlp = FlaxGPT2MLP(self.config, inner_dim, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + # residual connection + attn_output = attn_outputs[0] # output_attn: a, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + # Cross-Attention Block + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + outputs = (hidden_states,) + outputs + + return outputs + + +class FlaxGPT2PreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPT2Config + base_model_prefix = "transformer" + module_class: nn.Module = None + + def __init__( + self, + config: GPT2Config, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + if self.config.add_cross_attention: + encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) + encoder_attention_mask = attention_mask + module_init_outputs = self.module.init( + rngs, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states, + encoder_attention_mask, + return_dict=False, + ) + else: + module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) + + random_params = module_init_outputs["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length)) + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + params: dict = None, + past_key_values: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if encoder_hidden_states is not None and encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = input_ids.shape + + if position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") + + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + if attention_mask is None: + attention_mask = jnp.ones((batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPT2Attention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + encoder_hidden_states, + encoder_attention_mask, + not train, + False, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + return outputs + + +class FlaxGPT2BlockCollection(nn.Module): + config: GPT2Config + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.blocks = [ + FlaxGPT2Block(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + for block in self.blocks: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = block( + hidden_states, + attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # this contains possible `None` values - `FlaxGPT2Module` will filter them out + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) + + return outputs + + +class FlaxGPT2Module(nn.Module): + config: GPT2Config + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embed_dim = self.config.hidden_size + + self.wte = nn.Embed( + self.config.vocab_size, + self.embed_dim, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + dtype=self.dtype, + ) + self.wpe = nn.Embed( + self.config.max_position_embeddings, + self.embed_dim, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + dtype=self.dtype, + ) + self.dropout = nn.Dropout(rate=self.config.embd_pdrop) + self.h = FlaxGPT2BlockCollection(self.config, dtype=self.dtype) + self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic=True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + input_embeds = self.wte(input_ids.astype("i4")) + position_embeds = self.wpe(position_ids.astype("i4")) + + hidden_states = input_embeds + position_embeds + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + + outputs = self.h( + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.ln_f(hidden_states) + + if output_hidden_states: + all_hidden_states = outputs[1] + (hidden_states,) + outputs = (hidden_states, all_hidden_states) + outputs[2:] + else: + outputs = (hidden_states,) + outputs[1:] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=outputs[1], + attentions=outputs[2], + cross_attentions=outputs[3], + ) + + +@add_start_docstrings( + "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", + GPT2_START_DOCSTRING, +) +class FlaxGPT2Model(FlaxGPT2PreTrainedModel): + module_class = FlaxGPT2Module + + +append_call_sample_docstring( + FlaxGPT2Model, + _CHECKPOINT_FOR_DOC, + FlaxBaseModelOutputWithPastAndCrossAttentions, + _CONFIG_FOR_DOC, +) + + +class FlaxGPT2LMHeadModule(nn.Module): + config: GPT2Config + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.transformer = FlaxGPT2Module(self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.vocab_size, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + outputs = self.transformer( + input_ids, + attention_mask, + position_ids, + encoder_hidden_states, + encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T + lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) + else: + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + return (lm_logits,) + outputs[1:] + + return FlaxCausalLMOutputWithCrossAttentions( + logits=lm_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT2_START_DOCSTRING, +) +class FlaxGPT2LMHeadModel(FlaxGPT2PreTrainedModel): + module_class = FlaxGPT2LMHeadModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since GPT2 uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice( + extended_attention_mask, attention_mask.astype("i4"), (0, 0) + ) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring( + FlaxGPT2LMHeadModel, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutputWithCrossAttentions, + _CONFIG_FOR_DOC, +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..1409a3fc3f0fcbc16fb17d7fdbe1d166f2bc38ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_gpt2.py @@ -0,0 +1,1944 @@ +# coding=utf-8 +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch OpenAI GPT-2 model.""" + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.cuda.amp import autocast +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel, SequenceSummary +from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from ...utils.model_parallel_utils import assert_device_map, get_device_map +from .configuration_gpt2 import GPT2Config + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "openai-community/gpt2" +_CONFIG_FOR_DOC = "GPT2Config" + + +from ..deprecated._archive_maps import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): + """Load tf checkpoints in a pytorch model""" + try: + import re + + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(gpt2_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array.squeeze()) + + for name, array in zip(names, arrays): + name = name[6:] # skip "model/" + name = name.split("/") + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+\d+", m_name): + scope_names = re.split(r"(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "w" or scope_names[0] == "g": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "b": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "wpe" or scope_names[0] == "wte": + pointer = getattr(pointer, scope_names[0]) + pointer = getattr(pointer, "weight") + else: + pointer = getattr(pointer, scope_names[0]) + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + try: + if pointer.shape != array.shape: + raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") + except ValueError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class GPT2Attention(nn.Module): + def __init__(self, config, is_cross_attention=False, layer_idx=None): + super().__init__() + self.config = config + max_positions = config.max_position_embeddings + self.register_buffer( + "bias", + torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( + 1, 1, max_positions, max_positions + ), + persistent=False, + ) + self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) + + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.split_size = self.embed_dim + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + + self.scale_attn_weights = config.scale_attn_weights + self.is_cross_attention = is_cross_attention + + # Layer-wise attention scaling, reordering, and upcasting + self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx + self.layer_idx = layer_idx + self.reorder_and_upcast_attn = config.reorder_and_upcast_attn + + if self.is_cross_attention: + self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) + self.q_attn = Conv1D(self.embed_dim, self.embed_dim) + else: + self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) + self.c_proj = Conv1D(self.embed_dim, self.embed_dim) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + self.is_causal = True + + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) + index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) + + # Prune conv1d layers + self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) + self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) + + # Update hyper params + self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) + self.num_heads = self.num_heads - len(heads) + self.pruned_heads = self.pruned_heads.union(heads) + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + if self.scale_attn_weights: + attn_weights = attn_weights / torch.full( + [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device + ) + + # Layer-wise attention scaling + if self.scale_attn_by_inverse_layer_idx: + attn_weights = attn_weights / float(self.layer_idx + 1) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device) + attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise + attn_weights = attn_weights.type(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): + # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) + bsz, num_heads, q_seq_len, dk = query.size() + _, _, k_seq_len, _ = key.size() + + # Preallocate attn_weights for `baddbmm` + attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) + + # Compute Scale Factor + scale_factor = 1.0 + if self.scale_attn_weights: + scale_factor /= float(value.size(-1)) ** 0.5 + + if self.scale_attn_by_inverse_layer_idx: + scale_factor /= float(self.layer_idx + 1) + + # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) + with autocast(enabled=False): + q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) + attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) + attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) + attn_weights = torch.where(causal_mask, attn_weights, mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise + if attn_weights.dtype != torch.float32: + raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") + attn_weights = attn_weights.type(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + tensor = tensor.permute(0, 2, 1, 3).contiguous() + new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) + return tensor.view(new_shape) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + if self.reorder_and_upcast_attn: + attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) + else: + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPT2FlashAttention2(GPT2Attention): + """ + GPT2 flash attention module. This module inherits from `GPT2Attention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + bsz, _, _ = hidden_states.size() + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + present = None + if use_cache is True: + present = (key, value) + + query_length = query.shape[2] + tgt_len = key.shape[2] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) + key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + + attn_dropout = self.attn_dropout.p if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + if query.dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.c_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attn_output = self._flash_attention_forward( + query, key, value, attention_mask, query_length, dropout=attn_dropout + ) + + attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) + attn_output = self.c_proj(attn_weights_reshaped) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights_reshaped,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class GPT2MLP(nn.Module): + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = Conv1D(intermediate_size, embed_dim) + self.c_proj = Conv1D(embed_dim, intermediate_size) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +GPT2_ATTENTION_CLASSES = { + "eager": GPT2Attention, + "flash_attention_2": GPT2FlashAttention2, +} + + +class GPT2Block(nn.Module): + def __init__(self, config, layer_idx=None): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + attention_class = GPT2_ATTENTION_CLASSES[config._attn_implementation] + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = attention_class(config=config, layer_idx=layer_idx) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + if config.add_cross_attention: + self.crossattention = attention_class(config=config, is_cross_attention=True, layer_idx=layer_idx) + self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = GPT2MLP(inner_dim, config) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + +class GPT2PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPT2Config + load_tf_weights = load_tf_weights_in_gpt2 + base_model_prefix = "transformer" + is_parallelizable = True + supports_gradient_checkpointing = True + _no_split_modules = ["GPT2Block"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, Conv1D)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name == "c_proj.weight": + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))) + + +@dataclass +class GPT2DoubleHeadsModelOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): + Multiple choice classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): + Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). + past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, + sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + GPT2Attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + mc_loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + mc_logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +GPT2_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`GPT2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPT2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else + `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input + sequence tokens in the vocabulary. + + If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as + `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): + Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see + `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have + their past given to this model should not be passed as `input_ids` as they have already been computed. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for + `past_key_values`. In other words, the `attention_mask` always has to have the length: + `len(past_key_values) + len(input_ids)` + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + + If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see + `past_key_values`). + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" +PARALLELIZE_DOCSTRING = r""" + This is an experimental feature and is a subject to change at a moment's notice. + + Uses a device map to distribute attention modules of the model across several devices. If no device map is given, + it will evenly distribute blocks across all devices. + + Args: + device_map (`Dict[int, list]`, optional, defaults to None): + A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always + automatically mapped to the first device (for esoteric reasons). That means that the first device should + have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the + following number of attention modules: + + - openai-community/gpt2: 12 + - openai-community/gpt2-medium: 24 + - openai-community/gpt2-large: 36 + - openai-community/gpt2-xl: 48 + + Example: + + ```python + # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: + model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl") + device_map = { + 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], + 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], + 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], + 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], + } + model.parallelize(device_map) + ``` +""" +DEPARALLELIZE_DOCSTRING = r""" + Moves the model to cpu from a model parallel state. + + Example: + + ```python + # On a 4 GPU machine with openai-community/gpt2-large: + model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large") + device_map = { + 0: [0, 1, 2, 3, 4, 5, 6, 7], + 1: [8, 9, 10, 11, 12, 13, 14, 15], + 2: [16, 17, 18, 19, 20, 21, 22, 23], + 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], + } + model.parallelize(device_map) # Splits the model across several devices + model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() + ``` +""" + + +@add_start_docstrings( + "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", + GPT2_START_DOCSTRING, +) +class GPT2Model(GPT2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.hidden_size + + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + # Model parallel + self.model_parallel = False + self.device_map = None + self.gradient_checkpointing = False + self._attn_implementation = config._attn_implementation + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + # Check validity of device_map + warnings.warn( + "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" + " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" + " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," + " ...}", + FutureWarning, + ) + self.device_map = ( + get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map + ) + assert_device_map(self.device_map, len(self.h)) + self.model_parallel = True + self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) + self.last_device = "cuda:" + str(max(self.device_map.keys())) + self.wte = self.wte.to(self.first_device) + self.wpe = self.wpe.to(self.first_device) + # Load onto devices + for k, v in self.device_map.items(): + for block in v: + cuda_device = "cuda:" + str(k) + self.h[block] = self.h[block].to(cuda_device) + # ln_f to last + self.ln_f = self.ln_f.to(self.last_device) + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + warnings.warn( + "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", + FutureWarning, + ) + self.model_parallel = False + self.device_map = None + self.first_device = "cpu" + self.last_device = "cpu" + self.wte = self.wte.to("cpu") + self.wpe = self.wpe.to("cpu") + for index in range(len(self.h)): + self.h[index] = self.h[index].to("cpu") + self.ln_f = self.ln_f.to("cpu") + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + """ + for layer, heads in heads_to_prune.items(): + self.h[layer].attn.prune_heads(heads) + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) + + # Attention mask. + if attention_mask is not None: + attention_mask = attention_mask.view(batch_size, -1) + if self._attn_implementation == "flash_attention_2": + attention_mask = attention_mask if 0 in attention_mask else None + else: + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask = attention_mask[:, None, None, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.add_cross_attention and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + if self._attn_implementation != "flash_attention_2": + encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # head_mask has shape n_layer x batch x n_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + # Model parallel + if self.model_parallel: + torch.cuda.set_device(hidden_states.device) + # Ensure layer_past is on same device as hidden_states (might not be correct) + if layer_past is not None: + layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) + # Ensure that attention_mask is always on the same device as hidden_states + if attention_mask is not None: + attention_mask = attention_mask.to(hidden_states.device) + if isinstance(head_mask, torch.Tensor): + head_mask = head_mask.to(hidden_states.device) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = self._gradient_checkpointing_func( + block.__call__, + hidden_states, + None, + attention_mask, + head_mask[i], + encoder_hidden_states, + encoder_attention_mask, + use_cache, + output_attentions, + ) + else: + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) + + # Model Parallel: If it's the last layer for that device, put things on the next device + if self.model_parallel: + for k, v in self.device_map.items(): + if i == v[-1] and "cuda:" + str(k) != self.last_device: + hidden_states = hidden_states.to("cuda:" + str(k + 1)) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] + if v is not None + ) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT2_START_DOCSTRING, +) +class GPT2LMHeadModel(GPT2PreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPT2Model(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + warnings.warn( + "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" + " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" + " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" + " 0, 'transformer.h.1': 1, ...}", + FutureWarning, + ) + self.device_map = ( + get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.transformer.h)) + self.transformer.parallelize(self.device_map) + self.lm_head = self.lm_head.to(self.transformer.first_device) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + warnings.warn( + "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", + FutureWarning, + ) + self.transformer.deparallelize() + self.transformer = self.transformer.to("cpu") + self.lm_head = self.lm_head.to("cpu") + self.model_parallel = False + torch.cuda.empty_cache() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # Omit tokens covered by past_key_values + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + else: + position_ids = None + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + ) + + return model_inputs + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.transformer.first_device) + hidden_states = hidden_states.to(self.lm_head.weight.device) + + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + cross_attentions=transformer_outputs.cross_attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + """ +The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for +RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the +input embeddings, the classification head takes as input the input of a specified classification token index in the +input sequence). +""", + GPT2_START_DOCSTRING, +) +class GPT2DoubleHeadsModel(GPT2PreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + config.num_labels = 1 + self.transformer = GPT2Model(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + self.multiple_choice_head = SequenceSummary(config) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + warnings.warn( + "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should" + " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your" + " own `device_map` but it needs to be a dictionary module_name to device, so for instance" + " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", + FutureWarning, + ) + self.device_map = ( + get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.transformer.h)) + self.transformer.parallelize(self.device_map) + self.lm_head = self.lm_head.to(self.transformer.first_device) + self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + warnings.warn( + "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", + FutureWarning, + ) + self.transformer.deparallelize() + self.transformer = self.transformer.to("cpu") + self.lm_head = self.lm_head.to("cpu") + self.multiple_choice_head = self.multiple_choice_head.to("cpu") + self.model_parallel = False + torch.cuda.empty_cache() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # Omit tokens covered by past_key_values + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + else: + position_ids = None + + return { + "input_ids": input_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + mc_token_ids: Optional[torch.LongTensor] = None, + labels: Optional[torch.LongTensor] = None, + mc_labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]: + r""" + mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): + Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - + 1]`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to + `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` + mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` + where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) + + Return: + + Example: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") + + >>> # Add a [CLS] to the vocabulary (we should train it also!) + >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) + >>> # Update the model embeddings with the new vocabulary size + >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) + + >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] + >>> encoded_choices = [tokenizer.encode(s) for s in choices] + >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] + + >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 + >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 + + >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) + >>> lm_logits = outputs.logits + >>> mc_logits = outputs.mc_logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.transformer.first_device) + hidden_states = hidden_states.to(self.lm_head.weight.device) + + lm_logits = self.lm_head(hidden_states) + mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) + + mc_loss = None + if mc_labels is not None: + loss_fct = CrossEntropyLoss() + mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) + lm_loss = None + if labels is not None: + labels = labels.to(lm_logits.device) + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits, mc_logits) + transformer_outputs[1:] + if mc_loss is not None: + output = (mc_loss,) + output + return ((lm_loss,) + output) if lm_loss is not None else output + + return GPT2DoubleHeadsModelOutput( + loss=lm_loss, + mc_loss=mc_loss, + logits=lm_logits, + mc_logits=mc_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a sequence classification head on top (linear layer). + + [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-1) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPT2_START_DOCSTRING, +) +class GPT2ForSequenceClassification(GPT2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPT2Model(config) + self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint="microsoft/DialogRPT-updown", + output_type=SequenceClassifierOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size, sequence_length = input_ids.shape[:2] + else: + batch_size, sequence_length = inputs_embeds.shape[:2] + + assert ( + self.config.pad_token_id is not None or batch_size == 1 + ), "Cannot handle batch sizes > 1 if no padding token is defined." + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + GPT2_START_DOCSTRING, +) +class GPT2ForTokenClassification(GPT2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.transformer = GPT2Model(config) + if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: + classifier_dropout = config.classifier_dropout + elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + # fmt: off + @add_code_sample_docstrings( + checkpoint="brad1141/gpt2-finetuned-comp2", + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_loss=0.25, + expected_output=[ + "Lead", + "Lead", + "Lead", + "Position", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + "Lead", + ], + ) + # fmt: on + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + transformer_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like + SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + GPT2_START_DOCSTRING, +) +class GPT2ForQuestionAnswering(GPT2PreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPT2Model(config) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1).to(start_logits.device) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1).to(end_logits.device) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_tf_gpt2.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_tf_gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..26a4e7a398ae8d598e350cdfc1c7b17532936151 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/modeling_tf_gpt2.py @@ -0,0 +1,1238 @@ +# coding=utf-8 +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 OpenAI GPT-2 model.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithPastAndCrossAttentions, + TFCausalLMOutputWithCrossAttentions, + TFSequenceClassifierOutputWithPast, +) +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFConv1D, + TFModelInputType, + TFPreTrainedModel, + TFSequenceClassificationLoss, + TFSequenceSummary, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_gpt2 import GPT2Config + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "openai-community/gpt2" +_CONFIG_FOR_DOC = "GPT2Config" + + +from ..deprecated._archive_maps import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class TFAttention(keras.layers.Layer): + def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): + super().__init__(**kwargs) + + n_state = nx # in Attention: n_state=768 (nx=n_embd) + # [switch nx => n_state from Block to Attention to keep identical to TF implementation] + assert n_state % config.n_head == 0 + self.n_head = config.n_head + self.split_size = n_state + self.scale = scale + self.output_attentions = config.output_attentions + + self.is_cross_attention = is_cross_attention + + if self.is_cross_attention: + self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name="c_attn") + self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="q_attn") + else: + self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") + + self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") + self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) + self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) + self.pruned_heads = set() + self.embed_dim = n_state + + def prune_heads(self, heads): + pass + + @staticmethod + def causal_attention_mask(nd, ns, dtype): + """ + 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), + -1, ns-nd), but doesn't produce garbage on TPUs. + """ + i = tf.range(nd)[:, None] + j = tf.range(ns) + m = i >= j - ns + nd + return tf.cast(m, dtype) + + def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): + # q, k, v have shape [batch, heads, sequence, features] + w = tf.matmul(q, k, transpose_b=True) + if self.scale: + dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores + w = w / tf.math.sqrt(dk) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + + # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. + _, _, nd, ns = shape_list(w) + b = self.causal_attention_mask(nd, ns, dtype=w.dtype) + b = tf.reshape(b, [1, 1, nd, ns]) + w = w * b - 1e4 * (1 - b) + + if attention_mask is not None: + # Apply the attention mask + attention_mask = tf.cast(attention_mask, dtype=w.dtype) + w = w + attention_mask + + w = stable_softmax(w, axis=-1) + w = self.attn_dropout(w, training=training) + + # Mask heads if we want to + if head_mask is not None: + w = w * head_mask + + outputs = [tf.matmul(w, v)] + if output_attentions: + outputs.append(w) + return outputs + + def merge_heads(self, x): + x = tf.transpose(x, [0, 2, 1, 3]) + x_shape = shape_list(x) + new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] + return tf.reshape(x, new_x_shape) + + def split_heads(self, x): + x_shape = shape_list(x) + new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] + x = tf.reshape(x, new_x_shape) + return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) + + def call( + self, + x, + layer_past, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + use_cache, + output_attentions, + training=False, + ): + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn"): + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(x) + kv_out = self.c_attn(encoder_hidden_states) + key, value = tf.split(kv_out, 2, axis=2) + attention_mask = encoder_attention_mask + else: + x = self.c_attn(x) + query, key, value = tf.split(x, 3, axis=2) + + query = self.split_heads(query) + key = self.split_heads(key) + value = self.split_heads(value) + if layer_past is not None: + past_key, past_value = tf.unstack(layer_past, axis=0, num=2) + key = tf.concat([past_key, key], axis=-2) + value = tf.concat([past_value, value], axis=-2) + + # to cope with keras serialization + if use_cache: + present = tf.stack([key, value], axis=0) + else: + present = (None,) + + attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) + a = attn_outputs[0] + + a = self.merge_heads(a) + a = self.c_proj(a) + a = self.resid_dropout(a, training=training) + + outputs = [a, present] + attn_outputs[1:] + return outputs # a, present, (attentions) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if self.is_cross_attention: + c_attn_shape = 2 * self.embed_dim + else: + c_attn_shape = 3 * self.embed_dim + if getattr(self, "c_proj", None) is not None: + with tf.name_scope(self.c_proj.name): + self.c_proj.build([None, None, self.embed_dim]) + if getattr(self, "c_attn", None) is not None: + with tf.name_scope(self.c_attn.name): + self.c_attn.build([None, None, c_attn_shape]) + if getattr(self, "q_attn", None) is not None: + with tf.name_scope(self.q_attn.name): + self.q_attn.build([None, None, self.embed_dim]) + + +class TFMLP(keras.layers.Layer): + def __init__(self, n_state, config, **kwargs): + super().__init__(**kwargs) + nx = config.n_embd + self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") + self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") + self.act = get_tf_activation(config.activation_function) + self.dropout = keras.layers.Dropout(config.resid_pdrop) + self.intermediate_size = n_state + self.embed_dim = nx + + def call(self, x, training=False): + h = self.act(self.c_fc(x)) + h2 = self.c_proj(h) + h2 = self.dropout(h2, training=training) + return h2 + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "c_fc", None) is not None: + with tf.name_scope(self.c_fc.name): + self.c_fc.build([None, None, self.intermediate_size]) + if getattr(self, "c_proj", None) is not None: + with tf.name_scope(self.c_proj.name): + self.c_proj.build([None, None, self.embed_dim]) + + +class TFBlock(keras.layers.Layer): + def __init__(self, config, scale=False, **kwargs): + super().__init__(**kwargs) + nx = config.n_embd + inner_dim = config.n_inner if config.n_inner is not None else 4 * nx + self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") + self.attn = TFAttention(nx, config, scale, name="attn") + self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") + + if config.add_cross_attention: + self.crossattention = TFAttention(nx, config, scale, name="crossattention", is_cross_attention=True) + self.ln_cross_attn = keras.layers.LayerNormalization( + epsilon=config.layer_norm_epsilon, name="ln_cross_attn" + ) + + self.mlp = TFMLP(inner_dim, config, name="mlp") + self.hidden_size = config.hidden_size + + def call( + self, + x, + layer_past, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + use_cache, + output_attentions, + training=False, + ): + a = self.ln_1(x) + output_attn = self.attn( + a, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=use_cache, + output_attentions=output_attentions, + training=training, + ) + a = output_attn[0] # output_attn: a, present, (attentions) + outputs = output_attn[1:] + x = x + a + + # Cross-Attention Block + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + + ca = self.ln_cross_attn(x) + output_cross_attn = self.crossattention( + ca, + layer_past=None, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=False, + output_attentions=output_attentions, + training=training, + ) + ca = output_cross_attn[0] # output_attn: a, present, (cross_attentions) + x = x + ca + outputs = outputs + output_cross_attn[2:] # add cross attentions if we output attention weights + + m = self.ln_2(x) + m = self.mlp(m, training=training) + x = x + m + + outputs = [x] + outputs + return outputs # x, present, (attentions, cross_attentions) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "ln_1", None) is not None: + with tf.name_scope(self.ln_1.name): + self.ln_1.build([None, None, self.hidden_size]) + if getattr(self, "attn", None) is not None: + with tf.name_scope(self.attn.name): + self.attn.build(None) + if getattr(self, "ln_2", None) is not None: + with tf.name_scope(self.ln_2.name): + self.ln_2.build([None, None, self.hidden_size]) + if getattr(self, "mlp", None) is not None: + with tf.name_scope(self.mlp.name): + self.mlp.build(None) + if getattr(self, "crossattention", None) is not None: + with tf.name_scope(self.crossattention.name): + self.crossattention.build(None) + if getattr(self, "ln_cross_attn", None) is not None: + with tf.name_scope(self.ln_cross_attn.name): + self.ln_cross_attn.build([None, None, self.hidden_size]) + + +@keras_serializable +class TFGPT2MainLayer(keras.layers.Layer): + config_class = GPT2Config + + def __init__(self, config, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + self.config = config + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.use_cache = config.use_cache + self.return_dict = config.use_return_dict + + self.num_hidden_layers = config.n_layer + self.n_embd = config.n_embd + self.n_positions = config.n_positions + self.initializer_range = config.initializer_range + + self.wte = keras.layers.Embedding( + input_dim=config.vocab_size, + output_dim=config.hidden_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="wte", + ) + self.wpe = keras.layers.Embedding( + input_dim=config.n_positions, + output_dim=config.n_embd, + embeddings_initializer=get_initializer(config.initializer_range), + name="wpe", + ) + self.drop = keras.layers.Dropout(config.embd_pdrop) + self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)] + self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f") + self.embed_dim = config.hidden_size + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + """ + raise NotImplementedError + + @unpack_inputs + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if past_key_values is None: + past_length = 0 + past_key_values = [None] * len(self.h) + else: + past_length = shape_list(past_key_values[0][0])[-2] + + if position_ids is None: + position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) + + if attention_mask is not None: + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask_shape = shape_list(attention_mask) + attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + one_cst = tf.constant(1.0) + attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) + attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) + + # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 + if self.config.add_cross_attention and encoder_attention_mask is not None: + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype) + num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) + if num_dims_encoder_attention_mask == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if num_dims_encoder_attention_mask == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, + # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) + + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 + else: + encoder_extended_attention_mask = None + + encoder_attention_mask = encoder_extended_attention_mask + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + raise NotImplementedError + else: + head_mask = [None] * self.num_hidden_layers + # head_mask = tf.constant([0] * self.num_hidden_layers) + + position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.config.vocab_size) + inputs_embeds = self.wte(input_ids) + + position_embeds = self.wpe(position_ids) + + if token_type_ids is not None: + token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) + token_type_embeds = self.wte(token_type_ids) + else: + token_type_embeds = tf.constant(0.0) + + position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype) + token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) + hidden_states = inputs_embeds + position_embeds + token_type_embeds + hidden_states = self.drop(hidden_states, training=training) + + output_shape = input_shape + [shape_list(hidden_states)[-1]] + + presents = () if use_cache else None + all_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) + + outputs = block( + hidden_states, + layer_past, + attention_mask, + head_mask[i], + encoder_hidden_states, + encoder_attention_mask, + use_cache, + output_attentions, + training=training, + ) + + hidden_states, present = outputs[:2] + if use_cache: + presents = presents + (present,) + + if output_attentions: + all_attentions = all_attentions + (outputs[2],) + if self.config.add_cross_attention and encoder_hidden_states is not None: + all_cross_attentions = all_cross_attentions + (outputs[3],) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = tf.reshape(hidden_states, output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if output_attentions: + # let the number of heads free (-1) so we can extract attention even after head pruning + attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] + all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) + + if not return_dict: + return tuple( + v + for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions] + if v is not None + ) + + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "wte", None) is not None: + with tf.name_scope(self.wte.name): + self.wte.build(None) + if getattr(self, "wpe", None) is not None: + with tf.name_scope(self.wpe.name): + self.wpe.build(None) + if getattr(self, "ln_f", None) is not None: + with tf.name_scope(self.ln_f.name): + self.ln_f.build([None, None, self.embed_dim]) + if getattr(self, "h", None) is not None: + for layer in self.h: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFGPT2PreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPT2Config + base_model_prefix = "transformer" + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias", r"h.\d+.crossattention.bias"] + + @property + def input_signature(self): + # Although GPT-2 supports token_type_ids in theory, in practice they are rarely used, and the implementation + # means that passing token_type_ids=0 yields different outputs from token_type_ids=None. + # Therefore, we remove the token_type_ids argument by default, even though it would usually be included. + return { + "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + } + + +@dataclass +class TFGPT2DoubleHeadsModelOutput(ModelOutput): + """ + Base class for outputs of models predicting if two sentences are consecutive or not. + + Args: + logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`): + Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). + past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, + sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + logits: tf.Tensor = None + mc_logits: tf.Tensor = None + past_key_values: List[tf.Tensor] | None = None + hidden_states: Tuple[tf.Tensor] | None = None + attentions: Tuple[tf.Tensor] | None = None + + +GPT2_START_DOCSTRING = r""" + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Parameters: + config ([`GPT2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPT2_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` + (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. + + If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as + `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`List[tf.Tensor]` of length `config.n_layers`): + Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see + `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have + their past given to this model should not be passed as input ids as they have already been computed. + attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for + `past_key_values`. In other words, the `attention_mask` always has to have the length: + `len(past_key_values) + len(input_ids)` + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", + GPT2_START_DOCSTRING, +) +class TFGPT2Model(TFGPT2PreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.transformer = TFGPT2MainLayer(config, name="transformer") + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have + their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past`). Set to `False` during training, `True` during generation + """ + + outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT2_START_DOCSTRING, +) +class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.transformer = TFGPT2MainLayer(config, name="transformer") + + def get_output_embeddings(self): + return self.get_input_embeddings() + + def set_output_embeddings(self, value): + self.set_input_embeddings(value) + + def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # only last token for inputs_ids if past is defined in kwargs + if past_key_values: + inputs = tf.expand_dims(inputs[:, -1], -1) + if token_type_ids is not None: + token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) + + position_ids = kwargs.get("position_ids", None) + attention_mask = kwargs.get("attention_mask", None) + + if attention_mask is not None and position_ids is None: + position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) + if past_key_values: + position_ids = tf.expand_dims(position_ids[:, -1], -1) + + return { + "input_ids": inputs, + "attention_mask": attention_mask, + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "token_type_ids": token_type_ids, + } + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFCausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have + their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past`). Set to `False` during training, `True` during generation + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., + config.vocab_size - 1]`. + """ + + transformer_outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + hidden_states = transformer_outputs[0] + logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) + + loss = None + if labels is not None: + # shift labels to the left and cut last logit token + shifted_logits = logits[:, :-1] + labels = labels[:, 1:] + loss = self.hf_compute_loss(labels, shifted_logits) + + if not return_dict: + output = (logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFCausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + cross_attentions=transformer_outputs.cross_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for + RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the + input embeddings, the classification head takes as input the input of a specified classification token index in the + input sequence). + """, + GPT2_START_DOCSTRING, +) +class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + config.num_labels = 1 + self.transformer = TFGPT2MainLayer(config, name="transformer") + self.multiple_choice_head = TFSequenceSummary( + config, initializer_range=config.initializer_range, name="multiple_choice_head" + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + mc_token_ids: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]: + r""" + mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): + Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - + 1]`. + + Return: + + Examples: + + ```python + >>> import tensorflow as tf + >>> from transformers import AutoTokenizer, TFGPT2DoubleHeadsModel + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = TFGPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") + + >>> # Add a [CLS] to the vocabulary (we should train it also!) + >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) + + >>> embedding_layer = model.resize_token_embeddings( + ... len(tokenizer) + ... ) # Update the model embeddings with the new vocabulary size + + >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] + >>> encoded_choices = [tokenizer.encode(s) for s in choices] + >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] + + >>> input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 + >>> mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 + + >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) + >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] + ```""" + + if input_ids is not None: + input_shapes = shape_list(input_ids) + else: + input_shapes = shape_list(inputs_embeds)[:-1] + + seq_length = input_shapes[-1] + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None + flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None + flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None + flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None + transformer_outputs = self.transformer( + input_ids=flat_input_ids, + past_key_values=past_key_values, + attention_mask=flat_attention_mask, + token_type_ids=flat_token_type_ids, + position_ids=flat_position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + hidden_states = transformer_outputs[0] + hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) + if return_dict and output_hidden_states: + # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the + # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged) + all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,) + else: + all_hidden_states = None + lm_logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) + mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) + mc_logits = tf.squeeze(mc_logits, axis=-1) + + if not return_dict: + return (lm_logits, mc_logits) + transformer_outputs[1:] + + return TFGPT2DoubleHeadsModelOutput( + logits=lm_logits, + mc_logits=mc_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=all_hidden_states, + attentions=transformer_outputs.attentions, + ) + + @property + def input_signature(self): + return { + "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), + "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="mc_token_ids"), + } + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + if getattr(self, "multiple_choice_head", None) is not None: + with tf.name_scope(self.multiple_choice_head.name): + self.multiple_choice_head.build(None) + + +@add_start_docstrings( + """ + The GPT2 Model transformer with a sequence classification head on top (linear layer). + + [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-1) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPT2_START_DOCSTRING, +) +class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + self.score = keras.layers.Dense( + config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name="score", + use_bias=False, + ) + self.transformer = TFGPT2MainLayer(config, name="transformer") + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint="microsoft/DialogRPT-updown", + output_type=TFSequenceClassifierOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., + config.vocab_size - 1]`. + """ + transformer_outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + logits_shape = shape_list(logits) + in_logits = None + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = ( + tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) + - 1 + ) + sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1) + in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + loss = None + + if labels is not None: + assert ( + self.config.pad_token_id is not None or logits_shape[0] == 1 + ), "Cannot handle batch sizes > 1 if no padding token is defined." + + if not tf.is_tensor(sequence_lengths): + in_logits = logits[0 : logits_shape[0], sequence_lengths] + + loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) + pooled_logits = in_logits if in_logits is not None else logits + + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "score", None) is not None: + with tf.name_scope(self.score.name): + self.score.build([None, None, self.config.n_embd]) + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..36f3ca8fadb527f76359ac6bd8f65989ad631992 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2.py @@ -0,0 +1,345 @@ +# coding=utf-8 +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI GPT.""" + + +import json +import os +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class GPT2Tokenizer(PreTrainedTokenizer): + """ + Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import GPT2Tokenizer + + >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") + >>> tokenizer("Hello world")["input_ids"] + [15496, 995] + + >>> tokenizer(" Hello world")["input_ids"] + [18435, 995] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*): + The token used for padding, for example when batching sequences of different lengths. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (GPT2 tokenizer detect beginning of words by the preceding space). + add_bos_token (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial beginning of sentence token to the input. This allows to treat the leading + word just as any other word. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="<|endoftext|>", + bos_token="<|endoftext|>", + eos_token="<|endoftext|>", + pad_token=None, + add_prefix_space=False, + add_bos_token=False, + **kwargs, + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + + self.add_bos_token = add_bos_token + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + super().__init__( + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + add_bos_token=add_bos_token, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is None: + return output + + return output + bos_token_ids + token_ids_1 + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if is_split_into_words or add_prefix_space: + text = " " + text + return (text, kwargs) + + @property + def default_chat_template(self): + """ + A simple chat template that ignores role information and just concatenates messages with EOS tokens. + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}" diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_fast.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..fb3a5d4a0ce3f21dd7c7cb31ff4a740d8cdd8037 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_fast.py @@ -0,0 +1,156 @@ +# coding=utf-8 +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI GPT.""" + + +import json +from typing import Optional, Tuple + +from tokenizers import pre_tokenizers + +from ...tokenization_utils_base import BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_gpt2 import GPT2Tokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + + +class GPT2TokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level + Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import GPT2TokenizerFast + + >>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") + >>> tokenizer("Hello world")["input_ids"] + [15496, 995] + + >>> tokenizer(" Hello world")["input_ids"] + [18435, 995] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since + the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. + + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`, *optional*): + Path to the vocabulary file. + merges_file (`str`, *optional*): + Path to the merges file. + tokenizer_file (`str`, *optional*): + Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (GPT2 tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = GPT2Tokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + unk_token="<|endoftext|>", + bos_token="<|endoftext|>", + eos_token="<|endoftext|>", + add_prefix_space=False, + **kwargs, + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + self.add_bos_token = kwargs.pop("add_bos_token", False) + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) + + @property + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template + def default_chat_template(self): + """ + A simple chat template that ignores role information and just concatenates messages with EOS tokens. + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}" diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_tf.py b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..d763eb848550157528f2fab408c54139c83b9e30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt2/tokenization_gpt2_tf.py @@ -0,0 +1,104 @@ +import os +from typing import Dict, List, Union + +import tensorflow as tf +from keras_nlp.tokenizers import BytePairTokenizer +from tensorflow_text import pad_model_inputs + +from ...modeling_tf_utils import keras +from .tokenization_gpt2 import GPT2Tokenizer + + +class TFGPT2Tokenizer(keras.layers.Layer): + """ + This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the + `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings + from an existing standard tokenizer object. + + In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run + when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options + than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes + straight from `tf.string` inputs to outputs. + + Args: + vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer + merges (List[str]): Merges list for Byte Pair Tokenizer + """ + + def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int = None, pad_token_id: int = None): + super().__init__() + self.pad_token_id = pad_token_id + self.max_length = max_length + self.vocab = vocab + self.merges = merges + self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) + + @classmethod + def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): + """Creates TFGPT2Tokenizer from GPT2Tokenizer + + Args: + tokenizer (GPT2Tokenizer) + + Examples: + + ```python + from transformers import AutoTokenizer, TFGPT2Tokenizer + + tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer) + ``` + """ + merges = [" ".join(m) for m in tokenizer.bpe_ranks.keys()] + vocab = tokenizer.get_vocab() + return cls(vocab, merges, *args, **kwargs) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): + """Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer + + Args: + pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model + + Examples: + + ```python + from transformers import TFGPT2Tokenizer + + tf_tokenizer = TFGPT2Tokenizer.from_pretrained("openai-community/gpt2") + ``` + """ + tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) + return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) + + @classmethod + def from_config(cls, config): + """Creates TFGPT2Tokenizer from configurations + + Args: + config (Dict): Dictionary with keys such as stated in `get_config`. + """ + return cls(**config) + + def get_config(self): + return { + "vocab": self.vocab, + "merges": self.merges, + "max_length": self.max_length, + "pad_token_id": self.pad_token_id, + } + + def call(self, x, max_length: int = None): + input_ids = self.tf_tokenizer(x) + attention_mask = tf.ones_like(input_ids) + + if self.pad_token_id is not None: + # pad the tokens up to max length + max_length = max_length if max_length is not None else self.max_length + + if max_length is not None: + input_ids, attention_mask = pad_model_inputs( + input_ids, max_seq_length=max_length, pad_value=self.pad_token_id + ) + + return {"attention_mask": attention_mask, "input_ids": input_ids} diff --git a/venv/lib/python3.10/site-packages/triton/_C/libtriton.so b/venv/lib/python3.10/site-packages/triton/_C/libtriton.so new file mode 100644 index 0000000000000000000000000000000000000000..465425cf4f07b3d93c0813175357a23398a9ae84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/_C/libtriton.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5c1d5032e760505ba55bbf0a5b3e54ffd0116cb5231137f9d416570a5bb8b67 +size 356418800