diff --git a/.gitattributes b/.gitattributes index 6b02a2a02f26b78c58e72689f90646168d6a6e86..2da7a7bb6dbccca54f3b72d8917ad01c810290de 100644 --- a/.gitattributes +++ b/.gitattributes @@ -97,3 +97,6 @@ venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 filte venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pandas/_libs/join.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..82800e4816613f49c30af484302175e71cc1743b --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cde3df52feb43e58332d4bd9ecfc313c4cb6f0d0acdb41b0aa37b2691408516 +size 50332828 diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ae2f9a7b76899e422084b04cad2cd1447bed350 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682602 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..81da413601d5519ddadedf7fc4291ec8e122f2c6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:10:30:02,633 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:07,152 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:07,154 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:07,154 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:10:30:13,628 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:10:30:13,629 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 10:30:13,255] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:10:30:15,009 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,009 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,027 INFO [task.py:395] Building contexts for indiccopa-hi on rank 4... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 112760.93it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:10:30:16,730 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..dfef4885ee5aa548fac2d40f9adacbb15b363d4d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:02.502247", + "startedAt": "2024-05-14T10:30:02.089032", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3393.7487828947365, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.096, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.098, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.992, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92181015014648 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..c3ad6a3530e72181b3b2e93339791388b04999cd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 21}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..a75d0e3ceff94684d63687b3565a73a86079d277 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug-internal.log @@ -0,0 +1,191 @@ +2024-05-14 10:30:02,100 INFO StreamThr :1849 [internal.py:wandb_internal():85] W&B internal server running at pid: 1849, started at: 2024-05-14 10:30:02.099467 +2024-05-14 10:30:02,102 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:30:02,103 INFO WriterThread:1849 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/run-ys1xysw5.wandb +2024-05-14 10:30:02,103 DEBUG SenderThread:1849 [sender.py:send():378] send: header +2024-05-14 10:30:02,112 DEBUG SenderThread:1849 [sender.py:send():378] send: run +2024-05-14 10:30:02,330 INFO SenderThread:1849 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files +2024-05-14 10:30:02,330 INFO SenderThread:1849 [sender.py:_start_run_threads():1123] run started: ys1xysw5 with start time 1715682602.099126 +2024-05-14 10:30:02,336 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:30:02,337 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:30:02,418 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:30:02,419 DEBUG HandlerThread:1849 [system_info.py:__init__():26] System info init +2024-05-14 10:30:02,420 DEBUG HandlerThread:1849 [system_info.py:__init__():41] System info init done +2024-05-14 10:30:02,420 INFO HandlerThread:1849 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:30:02,420 INFO SystemMonitor:1849 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:30:02,420 INFO HandlerThread:1849 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:30:02,420 INFO SystemMonitor:1849 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:30:02,421 INFO SystemMonitor:1849 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:30:02,421 INFO SystemMonitor:1849 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:30:02,421 INFO SystemMonitor:1849 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:30:02,502 DEBUG HandlerThread:1849 [system_info.py:probe():150] Probing system +2024-05-14 10:30:02,510 DEBUG HandlerThread:1849 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:30:02,529 ERROR HandlerThread:1849 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:30:02,529 DEBUG HandlerThread:1849 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:30:02,529 DEBUG HandlerThread:1849 [system_info.py:probe():198] Probing system done +2024-05-14 10:30:02,529 DEBUG HandlerThread:1849 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:02.502247', 'startedAt': '2024-05-14T10:30:02.089032', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3393.7487828947365, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.096, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.098, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.992, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92181015014648}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:30:02,529 INFO HandlerThread:1849 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:30:02,529 INFO HandlerThread:1849 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:30:02,531 INFO HandlerThread:1849 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:30:02,534 DEBUG SenderThread:1849 [sender.py:send():378] send: files +2024-05-14 10:30:02,534 INFO SenderThread:1849 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:30:02,630 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:30:02,630 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:30:02,630 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:02,632 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:02,829 DEBUG SenderThread:1849 [sender.py:send():378] send: telemetry +2024-05-14 10:30:03,042 INFO wandb-upload_0:1849 [upload_job.py:push():130] Uploaded file /tmp/tmp32cc3t29wandb/7wkzytv0-wandb-metadata.json +2024-05-14 10:30:03,331 INFO Thread-12 :1849 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-metadata.json +2024-05-14 10:30:03,332 INFO Thread-12 :1849 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:03,332 INFO Thread-12 :1849 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt +2024-05-14 10:30:05,332 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:07,153 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:09,334 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:12,155 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:13,338 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:15,346 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:17,236 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:17,347 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:17,631 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:17,631 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:19,349 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:22,744 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:23,942 DEBUG SenderThread:1849 [sender.py:send():378] send: exit +2024-05-14 10:30:23,942 INFO SenderThread:1849 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 10:30:23,942 INFO SenderThread:1849 [sender.py:send_exit():587] handling runtime: 21 +2024-05-14 10:30:23,943 INFO SenderThread:1849 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,944 INFO SenderThread:1849 [sender.py:send_exit():593] send defer +2024-05-14 10:30:23,944 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,944 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:30:23,944 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,944 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:30:23,944 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:30:23,944 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,944 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:30:23,944 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,944 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:30:23,945 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:30:23,945 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,945 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:30:23,945 INFO HandlerThread:1849 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:30:23,945 DEBUG SystemMonitor:1849 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:30:23,945 INFO HandlerThread:1849 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:30:23,945 DEBUG SystemMonitor:1849 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:30:23,946 INFO HandlerThread:1849 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:30:23,946 DEBUG SystemMonitor:1849 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:30:23,946 INFO HandlerThread:1849 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:30:23,948 INFO HandlerThread:1849 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:30:23,948 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,948 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:30:23,948 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:30:23,948 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,948 DEBUG SenderThread:1849 [sender.py:send():378] send: stats +2024-05-14 10:30:23,948 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:30:23,949 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,949 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:30:23,949 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:30:23,949 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,949 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:30:23,949 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,950 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:30:23,950 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:30:23,950 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,950 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:30:23,950 DEBUG SenderThread:1849 [sender.py:send():378] send: summary +2024-05-14 10:30:23,950 INFO SenderThread:1849 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,951 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,951 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:30:23,951 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:30:23,951 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,951 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:30:23,951 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,951 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:30:23,953 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:24,059 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:30:24,059 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:24,059 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:30:24,059 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:24,059 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:30:24,352 INFO Thread-12 :1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml +2024-05-14 10:30:24,353 INFO Thread-12 :1849 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json +2024-05-14 10:30:24,942 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:25,270 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:30:25,270 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:25,270 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,271 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:30:25,271 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,271 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:30:25,271 INFO SenderThread:1849 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:30:25,271 INFO SenderThread:1849 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:30:25,271 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:30:25,271 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,271 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:30:25,271 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,271 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:30:25,272 INFO SenderThread:1849 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:30:25,353 INFO SenderThread:1849 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:25,353 INFO SenderThread:1849 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files +2024-05-14 10:30:25,354 INFO SenderThread:1849 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log output.log +2024-05-14 10:30:25,354 INFO SenderThread:1849 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json wandb-summary.json +2024-05-14 10:30:25,354 INFO SenderThread:1849 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:30:25,354 INFO SenderThread:1849 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt requirements.txt +2024-05-14 10:30:25,356 INFO SenderThread:1849 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml config.yaml +2024-05-14 10:30:25,357 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:30:25,358 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:25,358 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:30:25,361 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:25,361 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:30:25,361 INFO SenderThread:1849 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:25,620 INFO wandb-upload_0:1849 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/output.log +2024-05-14 10:30:25,759 INFO wandb-upload_1:1849 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/wandb-summary.json +2024-05-14 10:30:25,864 INFO wandb-upload_3:1849 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/config.yaml +2024-05-14 10:30:25,870 INFO wandb-upload_2:1849 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/files/requirements.txt +2024-05-14 10:30:25,942 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:25,942 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,070 INFO Thread-11 (_thread_body):1849 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:30:26,071 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,071 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:30:26,071 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,071 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:30:26,071 INFO SenderThread:1849 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:30:26,071 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:30:26,071 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,071 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:30:26,071 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,071 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:30:26,071 INFO SenderThread:1849 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:30:26,305 INFO SenderThread:1849 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:30:26,305 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:30:26,305 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,305 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:30:26,305 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,305 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:30:26,305 INFO SenderThread:1849 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:30:26,305 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,305 DEBUG SenderThread:1849 [sender.py:send():378] send: final +2024-05-14 10:30:26,305 INFO HandlerThread:1849 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:30:26,305 DEBUG SenderThread:1849 [sender.py:send():378] send: footer +2024-05-14 10:30:26,306 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,306 INFO SenderThread:1849 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:30:26,306 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:26,306 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,306 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:26,306 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,306 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:30:26,307 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:30:26,307 DEBUG SenderThread:1849 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:30:26,307 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:30:26,308 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:30:26,359 INFO MainThread:1849 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:30:26,359 INFO MainThread:1849 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:30:26,359 INFO MainThread:1849 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:30:26,359 DEBUG HandlerThread:1849 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:30:26,359 INFO HandlerThread:1849 [handler.py:finish():882] shutting down handler +2024-05-14 10:30:27,307 INFO WriterThread:1849 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/run-ys1xysw5.wandb +2024-05-14 10:30:27,358 INFO SenderThread:1849 [sender.py:finish():1545] shutting down sender +2024-05-14 10:30:27,358 INFO SenderThread:1849 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:27,358 INFO SenderThread:1849 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..87e2e528601080dcae8088104fab66541af0bb9d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Configure stats pid to 484 +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:30:02,096 WARNING MainThread:484 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug.log +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/logs/debug-internal.log +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:init():610] starting backend +2024-05-14 10:30:02,096 INFO MainThread:484 [wandb_init.py:init():614] setting up manager +2024-05-14 10:30:02,098 INFO MainThread:484 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:30:02,098 INFO MainThread:484 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:30:02,101 INFO MainThread:484 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:30:02,112 INFO MainThread:484 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:30:02,336 INFO MainThread:484 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:30:02,413 INFO MainThread:484 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:30:02,414 INFO MainThread:484 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:30:02,630 INFO MainThread:484 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:30:02,631 INFO MainThread:484 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:30:02,631 INFO MainThread:484 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:30:02,631 INFO MainThread:484 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:30:02,632 INFO MainThread:484 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:30:27,360 WARNING MsgRouterThr:484 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/run-ys1xysw5.wandb b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/run-ys1xysw5.wandb new file mode 100644 index 0000000000000000000000000000000000000000..3c1525f7fcb5b83845527cb6cc5281f668f838fc Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103002-ys1xysw5/run-ys1xysw5.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f6c8298d7f6388c49b502891050f1a4aef6d7c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705031 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..6dc384e6fced0335cc569c5b3670d8bb749e51db --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:43:52,003 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:43:56,660 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:43:56,664 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:43:56,664 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:44:05,342 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:44:05,342 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 16:44:04,851] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:16:44:06,865 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,865 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,886 INFO [task.py:395] Building contexts for indiccopa-hi on rank 5... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 105564.51it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:16:44:08,634 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..6cacd1e8f07643d15065026b72a26915b56f188e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:43:51.863532", + "startedAt": "2024-05-14T16:43:51.446916", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.142, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.998, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4306488037109 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..91df0012cef27fbd76437f2803da1fd4192acd69 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 24}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..7bd82babfa870c2af95c47da0f7f8f8af459f771 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug-internal.log @@ -0,0 +1,194 @@ +2024-05-14 16:43:51,458 INFO StreamThr :130120 [internal.py:wandb_internal():85] W&B internal server running at pid: 130120, started at: 2024-05-14 16:43:51.458008 +2024-05-14 16:43:51,460 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:43:51,462 INFO WriterThread:130120 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/run-mibaodgl.wandb +2024-05-14 16:43:51,462 DEBUG SenderThread:130120 [sender.py:send():378] send: header +2024-05-14 16:43:51,471 DEBUG SenderThread:130120 [sender.py:send():378] send: run +2024-05-14 16:43:51,702 INFO SenderThread:130120 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files +2024-05-14 16:43:51,702 INFO SenderThread:130120 [sender.py:_start_run_threads():1123] run started: mibaodgl with start time 1715705031.45782 +2024-05-14 16:43:51,710 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:43:51,710 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:43:51,793 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:43:51,795 DEBUG HandlerThread:130120 [system_info.py:__init__():26] System info init +2024-05-14 16:43:51,795 DEBUG HandlerThread:130120 [system_info.py:__init__():41] System info init done +2024-05-14 16:43:51,795 INFO HandlerThread:130120 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:43:51,795 INFO SystemMonitor:130120 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:43:51,795 INFO HandlerThread:130120 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:43:51,795 INFO SystemMonitor:130120 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:43:51,798 INFO SystemMonitor:130120 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:43:51,798 INFO SystemMonitor:130120 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:43:51,799 INFO SystemMonitor:130120 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:43:51,863 DEBUG HandlerThread:130120 [system_info.py:probe():150] Probing system +2024-05-14 16:43:51,871 DEBUG HandlerThread:130120 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:43:51,899 ERROR HandlerThread:130120 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:43:51,899 DEBUG HandlerThread:130120 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:43:51,899 DEBUG HandlerThread:130120 [system_info.py:probe():198] Probing system done +2024-05-14 16:43:51,899 DEBUG HandlerThread:130120 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:43:51.863532', 'startedAt': '2024-05-14T16:43:51.446916', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.142, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.998, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4306488037109}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:43:51,899 INFO HandlerThread:130120 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:43:51,899 INFO HandlerThread:130120 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:43:51,900 INFO HandlerThread:130120 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:43:51,904 DEBUG SenderThread:130120 [sender.py:send():378] send: files +2024-05-14 16:43:51,905 INFO SenderThread:130120 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:43:52,000 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:43:52,001 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:43:52,001 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:43:52,002 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:43:52,157 DEBUG SenderThread:130120 [sender.py:send():378] send: telemetry +2024-05-14 16:43:52,495 INFO wandb-upload_0:130120 [upload_job.py:push():130] Uploaded file /tmp/tmpdxjaefnuwandb/a9ha6ws2-wandb-metadata.json +2024-05-14 16:43:52,703 INFO Thread-12 :130120 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:43:52,704 INFO Thread-12 :130120 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt +2024-05-14 16:43:52,704 INFO Thread-12 :130120 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-metadata.json +2024-05-14 16:43:54,704 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:43:56,662 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:43:58,705 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:01,665 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:04,709 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:06,710 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:06,866 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:07,001 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:44:07,001 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:44:07,711 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:08,711 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:09,712 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:10,713 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:11,714 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:12,151 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,880 DEBUG SenderThread:130120 [sender.py:send():378] send: exit +2024-05-14 16:44:15,880 INFO SenderThread:130120 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:44:15,880 INFO SenderThread:130120 [sender.py:send_exit():587] handling runtime: 24 +2024-05-14 16:44:15,883 INFO SenderThread:130120 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,883 INFO SenderThread:130120 [sender.py:send_exit():593] send defer +2024-05-14 16:44:15,884 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,884 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:44:15,884 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,884 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:44:15,884 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:44:15,884 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,884 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:44:15,884 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,884 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:44:15,884 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:44:15,884 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,884 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:44:15,884 INFO HandlerThread:130120 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:44:15,885 INFO HandlerThread:130120 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:44:15,885 DEBUG SystemMonitor:130120 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:44:15,885 INFO HandlerThread:130120 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:44:15,885 DEBUG SystemMonitor:130120 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:44:15,885 INFO HandlerThread:130120 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:44:15,885 DEBUG SystemMonitor:130120 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:44:15,885 INFO HandlerThread:130120 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:44:15,887 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,887 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:44:15,887 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:44:15,888 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,888 DEBUG SenderThread:130120 [sender.py:send():378] send: stats +2024-05-14 16:44:15,888 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:44:15,888 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,889 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:44:15,889 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:44:15,889 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,889 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:44:15,889 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,889 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:44:15,889 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:44:15,889 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,889 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:44:15,889 DEBUG SenderThread:130120 [sender.py:send():378] send: summary +2024-05-14 16:44:15,890 INFO SenderThread:130120 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,890 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:44:15,890 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:44:15,890 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,890 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:44:15,890 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:44:15,894 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,958 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:44:15,959 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,959 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:44:15,959 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,959 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:44:16,717 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml +2024-05-14 16:44:16,717 INFO Thread-12 :130120 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json +2024-05-14 16:44:16,867 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:44:16,867 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,867 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:44:16,868 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,868 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:44:16,868 INFO SenderThread:130120 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:44:16,868 INFO SenderThread:130120 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:44:16,868 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:44:16,869 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,869 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:44:16,869 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,869 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:44:16,869 INFO SenderThread:130120 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:44:16,880 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,718 INFO Thread-12 :130120 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:17,718 INFO SenderThread:130120 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files +2024-05-14 16:44:17,718 INFO SenderThread:130120 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:44:17,719 INFO SenderThread:130120 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json wandb-summary.json +2024-05-14 16:44:17,719 INFO SenderThread:130120 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml config.yaml +2024-05-14 16:44:17,719 INFO SenderThread:130120 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt requirements.txt +2024-05-14 16:44:17,719 INFO SenderThread:130120 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log output.log +2024-05-14 16:44:17,720 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:44:17,721 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,722 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,724 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:44:17,725 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,725 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:44:17,725 INFO SenderThread:130120 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:17,881 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,881 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,958 INFO wandb-upload_1:130120 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/config.yaml +2024-05-14 16:44:18,145 INFO wandb-upload_0:130120 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/wandb-summary.json +2024-05-14 16:44:18,211 INFO wandb-upload_3:130120 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/output.log +2024-05-14 16:44:18,246 INFO wandb-upload_2:130120 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/files/requirements.txt +2024-05-14 16:44:18,446 INFO Thread-11 (_thread_body):130120 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:44:18,446 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,446 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:44:18,447 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,447 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:44:18,447 INFO SenderThread:130120 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:44:18,447 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:44:18,447 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,447 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:44:18,447 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,447 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:44:18,447 INFO SenderThread:130120 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:44:18,545 INFO SenderThread:130120 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:44:18,545 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:44:18,545 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,545 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:44:18,546 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,546 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:44:18,546 INFO SenderThread:130120 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:44:18,546 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,546 INFO HandlerThread:130120 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:44:18,546 DEBUG SenderThread:130120 [sender.py:send():378] send: final +2024-05-14 16:44:18,546 DEBUG SenderThread:130120 [sender.py:send():378] send: footer +2024-05-14 16:44:18,546 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,546 INFO SenderThread:130120 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:44:18,547 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,547 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,547 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,547 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:44:18,547 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,548 DEBUG SenderThread:130120 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:44:18,549 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:44:18,549 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:44:18,549 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:44:18,608 INFO MainThread:130120 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:44:18,608 INFO MainThread:130120 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:44:18,608 INFO MainThread:130120 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:44:18,609 DEBUG HandlerThread:130120 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:44:18,609 INFO HandlerThread:130120 [handler.py:finish():882] shutting down handler +2024-05-14 16:44:19,548 INFO WriterThread:130120 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/run-mibaodgl.wandb +2024-05-14 16:44:19,608 INFO SenderThread:130120 [sender.py:finish():1545] shutting down sender +2024-05-14 16:44:19,608 INFO SenderThread:130120 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:19,608 INFO SenderThread:130120 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..fda9297013682ed5a732ee2432c2cbde2b2b912c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:43:51,454 INFO MainThread:128892 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Configure stats pid to 128892 +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:43:51,455 WARNING MainThread:128892 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug.log +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/logs/debug-internal.log +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:init():610] starting backend +2024-05-14 16:43:51,455 INFO MainThread:128892 [wandb_init.py:init():614] setting up manager +2024-05-14 16:43:51,457 INFO MainThread:128892 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:43:51,457 INFO MainThread:128892 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:43:51,460 INFO MainThread:128892 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:43:51,471 INFO MainThread:128892 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:43:51,709 INFO MainThread:128892 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:43:51,789 INFO MainThread:128892 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:43:51,789 INFO MainThread:128892 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:43:52,000 INFO MainThread:128892 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:43:52,001 INFO MainThread:128892 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:43:52,001 INFO MainThread:128892 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:43:52,001 INFO MainThread:128892 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:43:52,002 INFO MainThread:128892 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:44:19,610 WARNING MsgRouterThr:128892 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/run-mibaodgl.wandb b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/run-mibaodgl.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e05427565e162ead438a552b7e5e1256bd9735e2 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164351-mibaodgl/run-mibaodgl.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb b/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb new file mode 100644 index 0000000000000000000000000000000000000000..33f35db9a963327001546928c1b409dafb6bf0a7 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bca4d8c452bd4dc49d47da86c0bc9ff7a22ad32f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716451088 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..54236eee710b11ed0f53c69a0af7c446d9c401e8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:07:58:09,202 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:07:58:17,659 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:07:58:17,660 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:07:58:17,661 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000'} +2024-05-23:07:58:19,945 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..fb7e97477c03dcacd8e35d8079b436cac033ef1e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T07:58:08.993659", + "startedAt": "2024-05-23T07:58:08.466458", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.4999875000003, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63564682006836 + } + }, + "memory": { + "total": 1007.4379806518555 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..dd40438664b9faafae8d83e7ea44b3a9b1f5c8ef --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 07:58:08,489 INFO StreamThr :1813 [internal.py:wandb_internal():85] W&B internal server running at pid: 1813, started at: 2024-05-23 07:58:08.486993 +2024-05-23 07:58:08,493 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: status +2024-05-23 07:58:08,494 INFO WriterThread:1813 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/run-stweehpg.wandb +2024-05-23 07:58:08,496 DEBUG SenderThread:1813 [sender.py:send():378] send: header +2024-05-23 07:58:08,499 DEBUG SenderThread:1813 [sender.py:send():378] send: run +2024-05-23 07:58:08,799 INFO SenderThread:1813 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files +2024-05-23 07:58:08,799 INFO SenderThread:1813 [sender.py:_start_run_threads():1123] run started: stweehpg with start time 1716451088.487075 +2024-05-23 07:58:08,800 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 07:58:08,800 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: check_version +2024-05-23 07:58:08,918 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 07:58:08,920 DEBUG HandlerThread:1813 [system_info.py:__init__():26] System info init +2024-05-23 07:58:08,920 DEBUG HandlerThread:1813 [system_info.py:__init__():41] System info init done +2024-05-23 07:58:08,921 INFO HandlerThread:1813 [system_monitor.py:start():194] Starting system monitor +2024-05-23 07:58:08,921 INFO SystemMonitor:1813 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 07:58:08,921 INFO HandlerThread:1813 [system_monitor.py:probe():214] Collecting system info +2024-05-23 07:58:08,928 INFO SystemMonitor:1813 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 07:58:08,928 INFO SystemMonitor:1813 [interfaces.py:start():188] Started disk monitoring +2024-05-23 07:58:08,928 INFO SystemMonitor:1813 [interfaces.py:start():188] Started memory monitoring +2024-05-23 07:58:08,929 INFO SystemMonitor:1813 [interfaces.py:start():188] Started network monitoring +2024-05-23 07:58:08,993 DEBUG HandlerThread:1813 [system_info.py:probe():150] Probing system +2024-05-23 07:58:08,996 DEBUG HandlerThread:1813 [system_info.py:_probe_git():135] Probing git +2024-05-23 07:58:09,006 ERROR HandlerThread:1813 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 07:58:09,006 DEBUG HandlerThread:1813 [system_info.py:_probe_git():143] Probing git done +2024-05-23 07:58:09,006 DEBUG HandlerThread:1813 [system_info.py:probe():198] Probing system done +2024-05-23 07:58:09,006 DEBUG HandlerThread:1813 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T07:58:08.993659', 'startedAt': '2024-05-23T07:58:08.466458', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4999875000003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63564682006836}}, 'memory': {'total': 1007.4379806518555}} +2024-05-23 07:58:09,006 INFO HandlerThread:1813 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 07:58:09,006 INFO HandlerThread:1813 [system_monitor.py:probe():227] Publishing system info +2024-05-23 07:58:09,010 INFO HandlerThread:1813 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 07:58:09,015 DEBUG SenderThread:1813 [sender.py:send():378] send: files +2024-05-23 07:58:09,015 INFO SenderThread:1813 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 07:58:09,196 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 07:58:09,196 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: python_packages +2024-05-23 07:58:09,198 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 07:58:09,199 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: stop_status +2024-05-23 07:58:09,269 DEBUG SenderThread:1813 [sender.py:send():378] send: telemetry +2024-05-23 07:58:09,633 INFO wandb-upload_0:1813 [upload_job.py:push():130] Uploaded file /tmp/tmpt2k1una0wandb/0k5jynqx-wandb-metadata.json +2024-05-23 07:58:09,802 INFO Thread-12 :1813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log +2024-05-23 07:58:09,802 INFO Thread-12 :1813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt +2024-05-23 07:58:09,802 INFO Thread-12 :1813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-metadata.json +2024-05-23 07:58:11,802 INFO Thread-12 :1813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log +2024-05-23 07:58:14,277 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:58:19,661 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:58:19,809 INFO Thread-12 :1813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log +2024-05-23 07:58:19,952 DEBUG SenderThread:1813 [sender.py:send():378] send: exit +2024-05-23 07:58:19,952 INFO SenderThread:1813 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 07:58:19,952 INFO SenderThread:1813 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 07:58:19,953 INFO SenderThread:1813 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:58:19,953 INFO SenderThread:1813 [sender.py:send_exit():593] send defer +2024-05-23 07:58:19,954 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,954 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 07:58:19,954 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,954 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 07:58:19,954 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 1 +2024-05-23 07:58:19,954 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,954 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 07:58:19,954 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,954 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 07:58:19,954 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 2 +2024-05-23 07:58:19,954 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,954 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 07:58:19,954 INFO HandlerThread:1813 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 07:58:19,954 DEBUG SystemMonitor:1813 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 07:58:19,954 DEBUG SystemMonitor:1813 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 07:58:19,954 DEBUG SystemMonitor:1813 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 07:58:19,955 INFO HandlerThread:1813 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 07:58:19,955 INFO HandlerThread:1813 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 07:58:19,955 INFO HandlerThread:1813 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 07:58:19,956 INFO HandlerThread:1813 [interfaces.py:finish():200] Joined network monitor +2024-05-23 07:58:19,956 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 3 +2024-05-23 07:58:19,956 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,956 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 07:58:19,956 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 4 +2024-05-23 07:58:19,956 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,956 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 07:58:19,956 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 07:58:19,956 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 5 +2024-05-23 07:58:19,956 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,957 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 07:58:19,957 DEBUG SenderThread:1813 [sender.py:send():378] send: summary +2024-05-23 07:58:19,958 INFO SenderThread:1813 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:58:19,958 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,958 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 07:58:19,958 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 6 +2024-05-23 07:58:19,958 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:19,958 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 07:58:19,958 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:19,958 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 07:58:19,963 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:58:20,034 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 7 +2024-05-23 07:58:20,034 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:20,034 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 07:58:20,034 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:20,034 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 07:58:20,810 INFO Thread-12 :1813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml +2024-05-23 07:58:20,810 INFO Thread-12 :1813 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json +2024-05-23 07:58:20,951 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:58:21,297 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 8 +2024-05-23 07:58:21,298 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:58:21,298 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:21,298 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 07:58:21,298 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:21,298 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 07:58:21,298 INFO SenderThread:1813 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 07:58:21,299 INFO SenderThread:1813 [job_builder.py:_get_source_type():576] no source found +2024-05-23 07:58:21,299 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 9 +2024-05-23 07:58:21,299 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:21,299 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 07:58:21,299 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:21,299 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 07:58:21,299 INFO SenderThread:1813 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 07:58:21,811 INFO SenderThread:1813 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log +2024-05-23 07:58:21,812 INFO SenderThread:1813 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files +2024-05-23 07:58:21,812 INFO SenderThread:1813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml config.yaml +2024-05-23 07:58:21,812 INFO SenderThread:1813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log output.log +2024-05-23 07:58:21,815 INFO SenderThread:1813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-metadata.json wandb-metadata.json +2024-05-23 07:58:21,815 INFO SenderThread:1813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt requirements.txt +2024-05-23 07:58:21,815 INFO SenderThread:1813 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json wandb-summary.json +2024-05-23 07:58:21,815 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 10 +2024-05-23 07:58:21,815 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:21,815 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 07:58:21,817 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:21,817 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 07:58:21,817 INFO SenderThread:1813 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:58:21,952 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:58:21,952 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:58:22,069 INFO wandb-upload_0:1813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/config.yaml +2024-05-23 07:58:22,422 INFO wandb-upload_1:1813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/output.log +2024-05-23 07:58:22,435 INFO wandb-upload_3:1813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/wandb-summary.json +2024-05-23 07:58:22,437 INFO wandb-upload_2:1813 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/files/requirements.txt +2024-05-23 07:58:22,638 INFO Thread-11 (_thread_body):1813 [sender.py:transition_state():613] send defer: 11 +2024-05-23 07:58:22,638 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:22,638 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 07:58:22,638 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:22,638 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 07:58:22,638 INFO SenderThread:1813 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 07:58:22,638 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 12 +2024-05-23 07:58:22,639 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:22,639 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 07:58:22,639 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:22,639 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 07:58:22,639 INFO SenderThread:1813 [file_stream.py:finish():601] file stream finish called +2024-05-23 07:58:22,710 INFO SenderThread:1813 [file_stream.py:finish():605] file stream finish is done +2024-05-23 07:58:22,710 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 13 +2024-05-23 07:58:22,710 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:22,710 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 07:58:22,710 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:22,710 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 07:58:22,710 INFO SenderThread:1813 [sender.py:transition_state():613] send defer: 14 +2024-05-23 07:58:22,710 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:58:22,710 INFO HandlerThread:1813 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 07:58:22,711 DEBUG SenderThread:1813 [sender.py:send():378] send: final +2024-05-23 07:58:22,711 DEBUG SenderThread:1813 [sender.py:send():378] send: footer +2024-05-23 07:58:22,711 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: defer +2024-05-23 07:58:22,711 INFO SenderThread:1813 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 07:58:22,712 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 07:58:22,712 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:58:22,712 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:58:22,712 DEBUG SenderThread:1813 [sender.py:send_request():405] send_request: server_info +2024-05-23 07:58:22,765 INFO MainThread:1813 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 07:58:22,765 INFO MainThread:1813 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 07:58:22,765 INFO MainThread:1813 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 07:58:22,765 DEBUG HandlerThread:1813 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 07:58:22,766 INFO HandlerThread:1813 [handler.py:finish():882] shutting down handler +2024-05-23 07:58:23,712 INFO WriterThread:1813 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/run-stweehpg.wandb +2024-05-23 07:58:23,765 INFO SenderThread:1813 [sender.py:finish():1545] shutting down sender +2024-05-23 07:58:23,765 INFO SenderThread:1813 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:58:23,765 INFO SenderThread:1813 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..ffa554b7e995df56a7699d79ac5db9842f4d8c1e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Configure stats pid to 1658 +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 07:58:08,481 WARNING MainThread:1658 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug.log +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/logs/debug-internal.log +2024-05-23 07:58:08,481 INFO MainThread:1658 [wandb_init.py:init():560] calling init triggers +2024-05-23 07:58:08,482 INFO MainThread:1658 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 07:58:08,482 INFO MainThread:1658 [wandb_init.py:init():610] starting backend +2024-05-23 07:58:08,482 INFO MainThread:1658 [wandb_init.py:init():614] setting up manager +2024-05-23 07:58:08,486 INFO MainThread:1658 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 07:58:08,486 INFO MainThread:1658 [wandb_init.py:init():622] backend started and connected +2024-05-23 07:58:08,490 INFO MainThread:1658 [wandb_init.py:init():711] updated telemetry +2024-05-23 07:58:08,498 INFO MainThread:1658 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 07:58:08,799 INFO MainThread:1658 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 07:58:08,912 INFO MainThread:1658 [wandb_run.py:_on_init():2405] got version response +2024-05-23 07:58:08,912 INFO MainThread:1658 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 07:58:09,197 INFO MainThread:1658 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 07:58:09,197 INFO MainThread:1658 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 07:58:09,198 INFO MainThread:1658 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 07:58:09,198 INFO MainThread:1658 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 07:58:09,199 INFO MainThread:1658 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 07:58:23,766 WARNING MsgRouterThr:1658 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/run-stweehpg.wandb b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/run-stweehpg.wandb new file mode 100644 index 0000000000000000000000000000000000000000..43fd283791203026ea00bcda02007ad1d9d36a7a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_075808-stweehpg/run-stweehpg.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..cc349d2419ef241c3482c63f69d01702afcbaf75 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T07:58:51.244770", + "startedAt": "2024-05-23T07:58:50.727693", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step18000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.413475, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3100.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63568496704102 + } + }, + "memory": { + "total": 1007.4379806518555 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..bee5d62e086269ee6a96533f7e3596a539256a9e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075850-ykmw09h9/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 10}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad452ff954cf167e36c04dd201d505ec3522ad8e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716467811 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9a2b38ea0765533ba8e1de892b72b75dd158ac2c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:36:52,051 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:37:00,494 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:37:00,494 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:37:00,495 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000'} +2024-05-23:12:37:02,853 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..90e33fd5bc724820c39960e510053e4d8cb7fcf8 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:36:51.842032", + "startedAt": "2024-05-23T12:36:51.351886", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.50000625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.58238983154297 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..0e38ede587863eb5078a91099179b8498124a96f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug-internal.log @@ -0,0 +1,184 @@ +2024-05-23 12:36:51,377 INFO StreamThr :3618 [internal.py:wandb_internal():85] W&B internal server running at pid: 3618, started at: 2024-05-23 12:36:51.376133 +2024-05-23 12:36:51,383 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:36:51,384 INFO WriterThread:3618 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/run-o53g5hgs.wandb +2024-05-23 12:36:51,386 DEBUG SenderThread:3618 [sender.py:send():378] send: header +2024-05-23 12:36:51,389 DEBUG SenderThread:3618 [sender.py:send():378] send: run +2024-05-23 12:36:51,643 INFO SenderThread:3618 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files +2024-05-23 12:36:51,644 INFO SenderThread:3618 [sender.py:_start_run_threads():1123] run started: o53g5hgs with start time 1716467811.376692 +2024-05-23 12:36:51,647 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:36:51,648 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:36:51,767 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:36:51,769 DEBUG HandlerThread:3618 [system_info.py:__init__():26] System info init +2024-05-23 12:36:51,769 DEBUG HandlerThread:3618 [system_info.py:__init__():41] System info init done +2024-05-23 12:36:51,769 INFO HandlerThread:3618 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:36:51,769 INFO SystemMonitor:3618 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:36:51,769 INFO HandlerThread:3618 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:36:51,776 INFO SystemMonitor:3618 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:36:51,777 INFO SystemMonitor:3618 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:36:51,782 INFO SystemMonitor:3618 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:36:51,783 INFO SystemMonitor:3618 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:36:51,841 DEBUG HandlerThread:3618 [system_info.py:probe():150] Probing system +2024-05-23 12:36:51,845 DEBUG HandlerThread:3618 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:36:51,855 ERROR HandlerThread:3618 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:36:51,855 DEBUG HandlerThread:3618 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:36:51,855 DEBUG HandlerThread:3618 [system_info.py:probe():198] Probing system done +2024-05-23 12:36:51,855 DEBUG HandlerThread:3618 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:36:51.842032', 'startedAt': '2024-05-23T12:36:51.351886', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.50000625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.58238983154297}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 12:36:51,855 INFO HandlerThread:3618 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:36:51,855 INFO HandlerThread:3618 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:36:51,859 INFO HandlerThread:3618 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:36:51,864 DEBUG SenderThread:3618 [sender.py:send():378] send: files +2024-05-23 12:36:51,864 INFO SenderThread:3618 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:36:52,044 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:36:52,044 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:36:52,045 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:36:52,047 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:36:52,147 DEBUG SenderThread:3618 [sender.py:send():378] send: telemetry +2024-05-23 12:36:52,440 INFO wandb-upload_0:3618 [upload_job.py:push():130] Uploaded file /tmp/tmp4ybptewtwandb/fv0nplvu-wandb-metadata.json +2024-05-23 12:36:52,646 INFO Thread-12 :3618 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-metadata.json +2024-05-23 12:36:52,646 INFO Thread-12 :3618 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log +2024-05-23 12:36:52,646 INFO Thread-12 :3618 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt +2024-05-23 12:36:54,646 INFO Thread-12 :3618 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log +2024-05-23 12:36:57,150 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:37:02,495 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:37:02,651 INFO Thread-12 :3618 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log +2024-05-23 12:37:02,860 DEBUG SenderThread:3618 [sender.py:send():378] send: exit +2024-05-23 12:37:02,860 INFO SenderThread:3618 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:37:02,860 INFO SenderThread:3618 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:send_exit():593] send defer +2024-05-23 12:37:02,862 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,862 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:37:02,862 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:37:02,862 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,862 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:37:02,862 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:37:02,862 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:37:02,863 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,863 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:37:02,863 INFO HandlerThread:3618 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:37:02,863 DEBUG SystemMonitor:3618 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:37:02,863 DEBUG SystemMonitor:3618 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:37:02,863 DEBUG SystemMonitor:3618 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:37:02,864 INFO HandlerThread:3618 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:37:02,864 INFO HandlerThread:3618 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:37:02,864 INFO HandlerThread:3618 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:37:02,864 INFO HandlerThread:3618 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:37:02,864 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,864 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:37:02,864 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:37:02,864 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,864 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:37:02,864 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,865 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:37:02,865 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:37:02,865 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,865 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:37:02,865 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,865 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:37:02,865 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:37:02,865 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,865 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:37:02,865 DEBUG SenderThread:3618 [sender.py:send():378] send: summary +2024-05-23 12:37:02,866 INFO SenderThread:3618 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:37:02,866 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,866 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:37:02,866 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:37:02,867 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,867 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:37:02,867 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,867 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:37:02,871 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:37:02,962 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:37:02,962 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:02,962 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:37:02,962 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:02,962 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:37:03,653 INFO Thread-12 :3618 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml +2024-05-23 12:37:03,653 INFO Thread-12 :3618 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json +2024-05-23 12:37:03,860 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:37:04,170 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:37:04,170 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:37:04,171 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:04,171 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:37:04,171 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:04,171 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:37:04,171 INFO SenderThread:3618 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:37:04,172 INFO SenderThread:3618 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:37:04,172 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:37:04,172 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:04,172 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:37:04,172 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:04,172 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:37:04,172 INFO SenderThread:3618 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:37:04,654 INFO SenderThread:3618 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log +2024-05-23 12:37:04,655 INFO SenderThread:3618 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files +2024-05-23 12:37:04,655 INFO SenderThread:3618 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log output.log +2024-05-23 12:37:04,655 INFO SenderThread:3618 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt requirements.txt +2024-05-23 12:37:04,657 INFO SenderThread:3618 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml config.yaml +2024-05-23 12:37:04,660 INFO SenderThread:3618 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:37:04,660 INFO SenderThread:3618 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json wandb-summary.json +2024-05-23 12:37:04,660 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:37:04,660 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:04,660 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:37:04,662 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:04,663 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:37:04,663 INFO SenderThread:3618 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:37:04,860 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:37:04,860 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:37:04,893 INFO wandb-upload_0:3618 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/output.log +2024-05-23 12:37:05,225 INFO wandb-upload_3:3618 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/wandb-summary.json +2024-05-23 12:37:05,252 INFO wandb-upload_1:3618 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/requirements.txt +2024-05-23 12:37:05,269 INFO wandb-upload_2:3618 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/files/config.yaml +2024-05-23 12:37:05,469 INFO Thread-11 (_thread_body):3618 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:37:05,469 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:05,469 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:37:05,469 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:05,470 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:37:05,470 INFO SenderThread:3618 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:37:05,470 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:37:05,470 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:05,470 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:37:05,470 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:05,470 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:37:05,470 INFO SenderThread:3618 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:37:05,860 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:37:05,872 INFO SenderThread:3618 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:37:05,872 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:37:05,872 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:37:05,872 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:05,872 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:37:05,873 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:05,873 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:37:05,873 INFO SenderThread:3618 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:37:05,873 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:37:05,873 DEBUG SenderThread:3618 [sender.py:send():378] send: final +2024-05-23 12:37:05,873 INFO HandlerThread:3618 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:37:05,873 DEBUG SenderThread:3618 [sender.py:send():378] send: footer +2024-05-23 12:37:05,873 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: defer +2024-05-23 12:37:05,873 INFO SenderThread:3618 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:37:05,874 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:37:05,875 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:37:05,875 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:37:05,875 DEBUG SenderThread:3618 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:37:05,929 INFO MainThread:3618 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:37:05,929 INFO MainThread:3618 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:37:05,929 INFO MainThread:3618 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:37:05,929 DEBUG HandlerThread:3618 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:37:05,929 INFO HandlerThread:3618 [handler.py:finish():882] shutting down handler +2024-05-23 12:37:06,875 INFO WriterThread:3618 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/run-o53g5hgs.wandb +2024-05-23 12:37:06,929 INFO SenderThread:3618 [sender.py:finish():1545] shutting down sender +2024-05-23 12:37:06,929 INFO SenderThread:3618 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:37:06,929 INFO SenderThread:3618 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..05a2b74eac447d753d193cffda2b3b8105c96dea --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Configure stats pid to 3463 +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:36:51,371 WARNING MainThread:3463 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:36:51,371 INFO MainThread:3463 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug.log +2024-05-23 12:36:51,372 INFO MainThread:3463 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/logs/debug-internal.log +2024-05-23 12:36:51,372 INFO MainThread:3463 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:36:51,372 INFO MainThread:3463 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:36:51,372 INFO MainThread:3463 [wandb_init.py:init():610] starting backend +2024-05-23 12:36:51,372 INFO MainThread:3463 [wandb_init.py:init():614] setting up manager +2024-05-23 12:36:51,375 INFO MainThread:3463 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:36:51,376 INFO MainThread:3463 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:36:51,380 INFO MainThread:3463 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:36:51,388 INFO MainThread:3463 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:36:51,647 INFO MainThread:3463 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:36:51,761 INFO MainThread:3463 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:36:51,761 INFO MainThread:3463 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:36:52,045 INFO MainThread:3463 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:36:52,045 INFO MainThread:3463 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:36:52,045 INFO MainThread:3463 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:36:52,045 INFO MainThread:3463 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:36:52,049 INFO MainThread:3463 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:37:06,930 WARNING MsgRouterThr:3463 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/run-o53g5hgs.wandb b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/run-o53g5hgs.wandb new file mode 100644 index 0000000000000000000000000000000000000000..36ecea61f604b01ae8f87392c5401bee1e6ce0a7 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_123651-o53g5hgs/run-o53g5hgs.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6de5c0faf8ba831427e5a0bb5b96f5aa304d99a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469363 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..90fa0ba189a84d96eb4c35ebc6c476288efb44a6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:02:44,231 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:02:53,641 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:02:53,642 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:02:53,643 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:13:02:55,935 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..70061575c1e1b3c9d04626a5c41d7ea490539b17 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:02:44.021854", + "startedAt": "2024-05-23T13:02:43.482903", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.5, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.62197494506836 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..018b4068e81a342faa2a3b0691dee6965106bcc3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 12}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..ffbda243336588f33fbdac1be7b4f22187243c84 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug-internal.log @@ -0,0 +1,185 @@ +2024-05-23 13:02:43,504 INFO StreamThr :815 [internal.py:wandb_internal():85] W&B internal server running at pid: 815, started at: 2024-05-23 13:02:43.502191 +2024-05-23 13:02:43,508 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:02:43,509 INFO WriterThread:815 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/run-vlbhvlfm.wandb +2024-05-23 13:02:43,511 DEBUG SenderThread:815 [sender.py:send():378] send: header +2024-05-23 13:02:43,514 DEBUG SenderThread:815 [sender.py:send():378] send: run +2024-05-23 13:02:43,824 INFO SenderThread:815 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files +2024-05-23 13:02:43,824 INFO SenderThread:815 [sender.py:_start_run_threads():1123] run started: vlbhvlfm with start time 1716469363.501902 +2024-05-23 13:02:43,825 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:02:43,825 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:02:43,944 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:02:43,947 DEBUG HandlerThread:815 [system_info.py:__init__():26] System info init +2024-05-23 13:02:43,947 DEBUG HandlerThread:815 [system_info.py:__init__():41] System info init done +2024-05-23 13:02:43,947 INFO HandlerThread:815 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:02:43,947 INFO SystemMonitor:815 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:02:43,947 INFO HandlerThread:815 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:02:43,954 INFO SystemMonitor:815 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:02:43,954 INFO SystemMonitor:815 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:02:43,960 INFO SystemMonitor:815 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:02:43,961 INFO SystemMonitor:815 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:02:44,021 DEBUG HandlerThread:815 [system_info.py:probe():150] Probing system +2024-05-23 13:02:44,024 DEBUG HandlerThread:815 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:02:44,034 ERROR HandlerThread:815 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:02:44,034 DEBUG HandlerThread:815 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:02:44,034 DEBUG HandlerThread:815 [system_info.py:probe():198] Probing system done +2024-05-23 13:02:44,034 DEBUG HandlerThread:815 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:02:44.021854', 'startedAt': '2024-05-23T13:02:43.482903', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.5, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.62197494506836}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:02:44,035 INFO HandlerThread:815 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:02:44,035 INFO HandlerThread:815 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:02:44,038 INFO HandlerThread:815 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:02:44,044 DEBUG SenderThread:815 [sender.py:send():378] send: files +2024-05-23 13:02:44,044 INFO SenderThread:815 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:02:44,223 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:02:44,224 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:02:44,224 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:02:44,227 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:02:44,334 DEBUG SenderThread:815 [sender.py:send():378] send: telemetry +2024-05-23 13:02:44,634 INFO wandb-upload_0:815 [upload_job.py:push():130] Uploaded file /tmp/tmpao_7zwc_wandb/d3oq04in-wandb-metadata.json +2024-05-23 13:02:44,825 INFO Thread-12 :815 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-metadata.json +2024-05-23 13:02:44,825 INFO Thread-12 :815 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt +2024-05-23 13:02:44,825 INFO Thread-12 :815 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log +2024-05-23 13:02:46,825 INFO Thread-12 :815 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log +2024-05-23 13:02:49,336 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:02:54,643 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:02:54,831 INFO Thread-12 :815 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log +2024-05-23 13:02:55,951 DEBUG SenderThread:815 [sender.py:send():378] send: exit +2024-05-23 13:02:55,951 INFO SenderThread:815 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:02:55,952 INFO SenderThread:815 [sender.py:send_exit():587] handling runtime: 12 +2024-05-23 13:02:55,953 INFO SenderThread:815 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:02:55,954 INFO SenderThread:815 [sender.py:send_exit():593] send defer +2024-05-23 13:02:55,954 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,954 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:02:55,954 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,954 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:02:55,954 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:02:55,954 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,954 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:02:55,954 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,954 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:02:55,954 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:02:55,954 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,954 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:02:55,954 INFO HandlerThread:815 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:02:55,955 INFO HandlerThread:815 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:02:55,955 INFO HandlerThread:815 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:02:55,955 INFO HandlerThread:815 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:02:55,955 INFO HandlerThread:815 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:02:55,955 DEBUG SystemMonitor:815 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:02:55,955 DEBUG SystemMonitor:815 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:02:55,955 DEBUG SystemMonitor:815 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:02:55,958 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,959 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:02:55,959 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:02:55,959 DEBUG SenderThread:815 [sender.py:send():378] send: stats +2024-05-23 13:02:55,960 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,960 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:02:55,960 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,960 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:02:55,960 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:02:55,960 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,960 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:02:55,960 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,960 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:02:55,960 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:02:55,960 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,960 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:02:55,960 DEBUG SenderThread:815 [sender.py:send():378] send: summary +2024-05-23 13:02:55,961 INFO SenderThread:815 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:02:55,961 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,961 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:02:55,962 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:02:55,962 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:55,962 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:02:55,962 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:55,962 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:02:55,967 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:02:56,032 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:02:56,032 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:56,032 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:02:56,032 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:56,032 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:02:56,834 INFO Thread-12 :815 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml +2024-05-23 13:02:56,834 INFO Thread-12 :815 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json +2024-05-23 13:02:56,952 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:02:58,359 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:02:58,359 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:02:58,359 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:58,359 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:02:58,359 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:58,359 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:02:58,359 INFO SenderThread:815 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:02:58,360 INFO SenderThread:815 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:02:58,360 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:02:58,360 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:58,360 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:02:58,360 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:58,360 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:02:58,360 INFO SenderThread:815 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:02:58,835 INFO SenderThread:815 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log +2024-05-23 13:02:58,836 INFO SenderThread:815 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files +2024-05-23 13:02:58,836 INFO SenderThread:815 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json wandb-summary.json +2024-05-23 13:02:58,836 INFO SenderThread:815 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt requirements.txt +2024-05-23 13:02:58,838 INFO SenderThread:815 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log output.log +2024-05-23 13:02:58,839 INFO SenderThread:815 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:02:58,839 INFO SenderThread:815 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml config.yaml +2024-05-23 13:02:58,839 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:02:58,839 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:58,839 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:02:58,839 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:58,839 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:02:58,839 INFO SenderThread:815 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:02:58,952 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:02:58,952 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:02:59,095 INFO wandb-upload_0:815 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/wandb-summary.json +2024-05-23 13:02:59,394 INFO wandb-upload_1:815 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/requirements.txt +2024-05-23 13:02:59,433 INFO wandb-upload_2:815 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/output.log +2024-05-23 13:02:59,490 INFO wandb-upload_3:815 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/files/config.yaml +2024-05-23 13:02:59,690 INFO Thread-11 (_thread_body):815 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:02:59,690 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:59,690 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:02:59,691 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:59,691 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:02:59,691 INFO SenderThread:815 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:02:59,691 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:02:59,691 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:59,691 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:02:59,691 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:59,691 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:02:59,691 INFO SenderThread:815 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:02:59,953 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:02:59,957 INFO SenderThread:815 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:02:59,957 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:02:59,958 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:02:59,958 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:59,958 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:02:59,958 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:59,958 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:02:59,958 INFO SenderThread:815 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:02:59,958 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:02:59,958 INFO HandlerThread:815 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:02:59,958 DEBUG SenderThread:815 [sender.py:send():378] send: final +2024-05-23 13:02:59,958 DEBUG SenderThread:815 [sender.py:send():378] send: footer +2024-05-23 13:02:59,958 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: defer +2024-05-23 13:02:59,958 INFO SenderThread:815 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:02:59,959 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:02:59,959 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:02:59,959 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:02:59,959 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:02:59,959 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:02:59,960 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:02:59,960 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:02:59,960 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:02:59,960 DEBUG SenderThread:815 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:03:00,015 INFO MainThread:815 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:03:00,015 INFO MainThread:815 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:03:00,015 INFO MainThread:815 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:03:00,016 DEBUG HandlerThread:815 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:03:00,016 INFO HandlerThread:815 [handler.py:finish():882] shutting down handler +2024-05-23 13:03:00,960 INFO WriterThread:815 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/run-vlbhvlfm.wandb +2024-05-23 13:03:01,015 INFO SenderThread:815 [sender.py:finish():1545] shutting down sender +2024-05-23 13:03:01,015 INFO SenderThread:815 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:03:01,015 INFO SenderThread:815 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..855e334071f65aaf7f08634c8ae0a7b8d59ef3ff --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Configure stats pid to 659 +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:02:43,497 WARNING MainThread:659 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug.log +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/logs/debug-internal.log +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:init():610] starting backend +2024-05-23 13:02:43,497 INFO MainThread:659 [wandb_init.py:init():614] setting up manager +2024-05-23 13:02:43,500 INFO MainThread:659 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:02:43,501 INFO MainThread:659 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:02:43,505 INFO MainThread:659 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:02:43,513 INFO MainThread:659 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:02:43,824 INFO MainThread:659 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:02:43,938 INFO MainThread:659 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:02:43,939 INFO MainThread:659 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:02:44,225 INFO MainThread:659 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:02:44,225 INFO MainThread:659 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:02:44,225 INFO MainThread:659 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:02:44,225 INFO MainThread:659 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:02:44,229 INFO MainThread:659 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:03:01,017 WARNING MsgRouterThr:659 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/run-vlbhvlfm.wandb b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/run-vlbhvlfm.wandb new file mode 100644 index 0000000000000000000000000000000000000000..72844cae9f53435c57cbc85031a64a05c5ad588b Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_130243-vlbhvlfm/run-vlbhvlfm.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/config.yaml b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..456006babe49ae6c689f44080555cbb33032052a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/config.yaml @@ -0,0 +1,283 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716987928 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step30000,tokenizer=huggyllama/llama-13b + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/media/table/evaluation/eval_results_1_fbb22263b1e18b2e7bbe.table.json b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/media/table/evaluation/eval_results_1_fbb22263b1e18b2e7bbe.table.json new file mode 100644 index 0000000000000000000000000000000000000000..5919f2846639aaf2776ecb299ff32888fcdf4d94 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/media/table/evaluation/eval_results_1_fbb22263b1e18b2e7bbe.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.47592738752959746", "0.0140"], ["sst2", 1.0, "none", 0, "acc", "0.5126146788990825", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5282916213275299", "0.0116"], ["piqa", 1.0, "none", 0, "acc_norm", "0.500544069640914", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.49754901960784315", "0.0248"], ["mrpc", 1.0, "none", 0, "f1", "0.594059405940594", "0.0259"], ["copa", 1.0, "none", 0, "acc", "0.65", "0.0479"], ["boolq", 2.0, "none", 0, "acc", "0.5064220183486239", "0.0087"], ["arc_easy", 1.0, "none", 0, "acc", "0.26136363636363635", "0.0090"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.26304713804713803", "0.0090"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/output.log b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..56e6bd0f5d32bfb9b3e57fbac15a18ba8d16ff8b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130528-qqvuwacj/files/output.log @@ -0,0 +1,489 @@ + +2024-05-29:13:05:29,030 INFO [__main__.py:251] Verbosity set to INFO +2024-05-29:13:05:38,564 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-29:13:05:38,565 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-29:13:05:38,566 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step30000', 'tokenizer': 'huggyllama/llama-13b'} +2024-05-29:13:05:40,885 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 17.2MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 4.33MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 4.71MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 1.15MB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 50306.80 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 330920.35 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 191413.39 examples/s] +2024-05-29:13:06:11,859 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:11,860 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 39.0MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 31.3MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 36.7MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 21893.00 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22241.42 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 23197.16 examples/s] +2024-05-29:13:06:15,552 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:15,553 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 1.23MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16289.98 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 12671.23 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 17242.91 examples/s] +2024-05-29:13:06:17,881 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:17,881 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-29:13:06:17,882 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-29:13:06:17,882 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 45.2MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 4.43MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 515kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.07MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 409385.50 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 177555.10 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 357982.01 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 12.6MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.6MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.16MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 22.0MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23664.84 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24456.31 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 22623.81 examples/s] +2024-05-29:13:06:28,855 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:28,855 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 20.1MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 509kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 1.02MB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1422904.82 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 400595.08 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 542459.35 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 17.8MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 20.6MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 6.94MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 24326.81 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24409.26 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23774.23 examples/s] +2024-05-29:13:06:41,393 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 69435.59it/s] +2024-05-29:13:06:41,475 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2580.49it/s] +2024-05-29:13:06:41,843 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1085.42it/s] +2024-05-29:13:06:43,612 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1880.82it/s] +2024-05-29:13:06:43,848 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 60707.83it/s] +2024-05-29:13:06:43,857 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1991.77it/s] +2024-05-29:13:06:45,633 INFO [task.py:395] Building contexts for arc_easy on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1073.21it/s] +2024-05-29:13:06:47,992 INFO [evaluator.py:379] Running loglikelihood requests +Running loglikelihood requests: 0%| | 0/25011 [00:00>> from transformers import MambaConfig, MambaModel + + >>> # Initializing a Mamba configuration + >>> configuration = MambaConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = MambaModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "mamba" + + def __init__( + self, + vocab_size=50280, + hidden_size=768, + state_size=16, + num_hidden_layers=32, + layer_norm_epsilon=1e-5, + pad_token_id=0, + bos_token_id=0, + eos_token_id=0, + expand=2, + conv_kernel=4, + use_bias=False, + use_conv_bias=True, + hidden_act="silu", + initializer_range=0.1, + residual_in_fp32=True, + time_step_rank="auto", + time_step_scale=1.0, + time_step_min=0.001, + time_step_max=0.1, + time_step_init_scheme="random", + time_step_floor=1e-4, + rescale_prenorm_residual=False, + use_cache=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.state_size = state_size + self.num_hidden_layers = num_hidden_layers + self.layer_norm_epsilon = layer_norm_epsilon + self.conv_kernel = conv_kernel + self.expand = expand + self.intermediate_size = int(expand * self.hidden_size) + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.use_bias = use_bias + self.use_conv_bias = use_conv_bias + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == "auto" else time_step_rank + self.time_step_scale = time_step_scale + self.time_step_min = time_step_min + self.time_step_max = time_step_max + self.time_step_init_scheme = time_step_init_scheme + self.time_step_floor = time_step_floor + self.rescale_prenorm_residual = rescale_prenorm_residual + self.residual_in_fp32 = residual_in_fp32 + self.use_cache = use_cache + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf7dcc0edafab9a7d0b7d0824063d2acf5d0783 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mamba/convert_mamba_ssm_checkpoint_to_pytorch.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba_ssm` package to be installed.""" + +import argparse +import json +import math +from typing import Tuple + +import torch + +from transformers import AutoTokenizer, MambaConfig, MambaForCausalLM +from transformers.utils import logging +from transformers.utils.import_utils import is_mamba_ssm_available + + +if is_mamba_ssm_available(): + from mamba_ssm.models.config_mamba import MambaConfig as MambaConfigSSM + from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + + def convert_ssm_config_to_hf_config(config_ssm: MambaConfigSSM) -> MambaConfig: + """Convert a MambaConfig from mamba_ssm to a MambaConfig from transformers.""" + hf_config = MambaConfig() + # Set config hidden size, num hidden layers, and vocab size directly from the original config + hf_config.hidden_size = config_ssm.d_model + hf_config.intermediate_size = config_ssm.d_model * 2 + hf_config.time_step_rank = math.ceil(config_ssm.d_model / 16) + + hf_config.num_hidden_layers = config_ssm.n_layer + vocab_size = config_ssm.vocab_size + pad_vocab_size_multiple = config_ssm.pad_vocab_size_multiple + if (vocab_size % pad_vocab_size_multiple) != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + hf_config.vocab_size = vocab_size + return hf_config + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def convert_mamba_ssm_checkpoint_to_huggingface_model( + original_state_dict: dict, original_ssm_config_dict: dict +) -> Tuple[MambaForCausalLM, AutoTokenizer]: + if not is_mamba_ssm_available(): + raise ImportError( + "Calling convert_mamba_ssm_checkpoint_to_huggingface_model requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`." + ) + original_ssm_config = MambaConfigSSM(**original_ssm_config_dict) + + # Convert mamba_ssm config to huggingface MambaConfig + hf_config = convert_ssm_config_to_hf_config(original_ssm_config) + + # No weights need to be renamed between the two models. + converted_state_dict = original_state_dict + + # Load reshaped state dict into a huggingface model. + hf_model = MambaForCausalLM(hf_config) + tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + hf_model.load_state_dict(converted_state_dict) + return (hf_model, tokenizer) + + +def validate_converted_model( + original_state_dict: dict, original_ssm_config_dict: dict, hf_model: MambaForCausalLM, tokenizer: AutoTokenizer +) -> None: + """Validate the converted model returns the same output as the original model.""" + torch_device = "cuda" + + original_config = MambaConfigSSM(**original_ssm_config_dict) + original_model = MambaLMHeadModel(original_config).to(torch_device) + original_model.load_state_dict(original_state_dict) + + hf_model = hf_model.to(torch_device) + input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"].to(torch_device) + # Assert model logits are close + with torch.no_grad(): + original_model_logits = original_model(input_ids).logits + hf_model_logits = hf_model(input_ids).logits + if not torch.allclose(original_model_logits, hf_model_logits, atol=1e-3): + raise ValueError("The converted model did not return the same logits as the original model.") + + logger.info("Model conversion validated successfully.") + + +def convert_mamba_checkpoint_file_to_huggingface_model_file( + mamba_checkpoint_path: str, config_json_file: str, output_dir: str +) -> None: + if not is_mamba_ssm_available(): + raise ImportError( + "Calling convert_mamba_checkpoint_file_to_huggingface_model_file requires the mamba_ssm library to be installed. Please install it with `pip install mamba_ssm`." + ) + if not torch.cuda.is_available(): + raise ValueError( + "This script is to be run with a CUDA device, as the original mamba_ssm model does not support cpu." + ) + logger.info(f"Loading model from {mamba_checkpoint_path} based on config from {config_json_file}") + # Load weights and config from paths + original_state_dict = torch.load(mamba_checkpoint_path, map_location="cpu") + with open(config_json_file, "r", encoding="utf-8") as json_file: + original_ssm_config_dict = json.load(json_file) + + # Convert the model + hf_model, tokenizer = convert_mamba_ssm_checkpoint_to_huggingface_model( + original_state_dict, original_ssm_config_dict + ) + + # Validate the conversion + validate_converted_model(original_state_dict, original_ssm_config_dict, hf_model, tokenizer) + + logger.info(f"Model converted successfully. Saving model to {output_dir}") + + # Save new model to pytorch_dump_path + hf_model.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--mamba_checkpoint_file", + type=str, + required=True, + help="Path to a `pytorch_model.bin` mamba_ssm checkpoint file to be converted.", + ) + parser.add_argument( + "-c", + "--config_json_file", + type=str, + required=True, + help="Path to a `config.json` file corresponding to a MambaConfig of the original mamba_ssm model.", + ) + parser.add_argument( + "-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to." + ) + args = parser.parse_args() + + convert_mamba_checkpoint_file_to_huggingface_model_file( + args.mamba_checkpoint_file, args.config_json_file, args.output_dir + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mamba/modeling_mamba.py b/venv/lib/python3.10/site-packages/transformers/models/mamba/modeling_mamba.py new file mode 100644 index 0000000000000000000000000000000000000000..8f19c361269e2763a631e29e3204032e22193fc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mamba/modeling_mamba.py @@ -0,0 +1,709 @@ +# coding=utf-8 +# Copyright 2024 state-spaces/mamba org and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch MAMBA model.""" + +import math +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available +from .configuration_mamba import MambaConfig + + +logger = logging.get_logger(__name__) + +if is_mamba_ssm_available(): + from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +else: + selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None + +if is_causal_conv1d_available(): + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +else: + causal_conv1d_update, causal_conv1d_fn = None, None + +is_fast_path_available = all( + (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn) +) + +_CHECKPOINT_FOR_DOC = "state-spaces/mamba-130m-hf" +_CONFIG_FOR_DOC = "MambaConfig" + + +from ..deprecated._archive_maps import MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class MambaCache: + """ + Arguments: + config: MambaConfig + batch_size: int + dtype: torch.dtype + device: torch.device + + Attributes: + seqlen_offset: int + dtype: torch.dtype + conv_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, conv_kernel_size] + ssm_states: Dict[int, torch.Tensor] # layer_idx -> [batch_size, intermediate_size, ssm_state_size] + """ + + def __init__( + self, config: MambaConfig, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None + ): + self.seqlen_offset = 0 + self.dtype = dtype + intermediate_size = config.intermediate_size + ssm_state_size = config.state_size + conv_kernel_size = config.conv_kernel + + self.conv_states = { + i: torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype) + for i in range(config.num_hidden_layers) + } + self.ssm_states = { + i: torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype) + for i in range(config.num_hidden_layers) + } + + +class MambaMixer(nn.Module): + """ + Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. + A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) + ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, + and is why Mamba is called **selective** state spaces) + """ + + def __init__(self, config: MambaConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + self.ssm_state_size = config.state_size + self.conv_kernel_size = config.conv_kernel + self.intermediate_size = config.intermediate_size + self.time_step_rank = int(config.time_step_rank) + self.layer_idx = layer_idx + self.use_conv_bias = config.use_conv_bias + self.conv1d = nn.Conv1d( + in_channels=self.intermediate_size, + out_channels=self.intermediate_size, + bias=config.use_conv_bias, + kernel_size=config.conv_kernel, + groups=self.intermediate_size, + padding=config.conv_kernel - 1, + ) + + self.activation = config.hidden_act + self.act = ACT2FN[config.hidden_act] + + # projection of the input hidden states + self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias) + # selective projection used to make dt, B and C input dependant + self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) + # time step projection (discretization) + self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) + + # S4D real initialization. These are not discretized! + # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded + A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] + A = A.expand(self.intermediate_size, -1).contiguous() + + self.A_log = nn.Parameter(torch.log(A)) + self.D = nn.Parameter(torch.ones(self.intermediate_size)) + self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) + self.use_bias = config.use_bias + + if not is_fast_path_available: + logger.warning_once( + "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`" + " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" + " https://github.com/Dao-AILab/causal-conv1d" + ) + + def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache] = None): + # 1. Gated MLP's linear projection + projected_states = self.in_proj(hidden_states).transpose(1, 2) + + if self.training and cache_params is None: # Doesn't support outputting the states -> used for training + contextualized_states = mamba_inner_fn( + projected_states, + self.conv1d.weight, + self.conv1d.bias if self.use_conv_bias else None, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias.float() if self.use_bias else None, + -torch.exp(self.A_log.float()), + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + + else: + hidden_states, gate = projected_states.chunk(2, dim=1) + + # 2. Convolution sequence transformation + conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) + if cache_params is not None and cache_params.seqlen_offset > 0: + hidden_states = causal_conv1d_update( + hidden_states.squeeze(-1), + cache_params.conv_states[self.layer_idx], + conv_weights, + self.conv1d.bias, + self.activation, + ) + hidden_states = hidden_states.unsqueeze(-1) + else: + if cache_params is not None: + conv_states = nn.functional.pad( + hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0) + ) + cache_params.conv_states[self.layer_idx].copy_(conv_states) + hidden_states = causal_conv1d_fn( + hidden_states, conv_weights, self.conv1d.bias, activation=self.activation + ) + + # 3. State Space Model sequence transformation + # 3.a. input varying initialization of time_step, B and C + ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) + time_step, B, C = torch.split( + ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 + ) + discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2) + + A = -torch.exp(self.A_log.float()) + # 3.c perform the recurrence y ← SSM(A, B, C)(x) + time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None + if cache_params is not None and cache_params.seqlen_offset > 0: + scan_outputs = selective_state_update( + cache_params.ssm_states[self.layer_idx], + hidden_states[..., 0], + discrete_time_step[..., 0], + A, + B[:, 0], + C[:, 0], + self.D, + gate[..., 0], + time_proj_bias, + dt_softplus=True, + ).unsqueeze(-1) + else: + scan_outputs, ssm_state = selective_scan_fn( + hidden_states, + discrete_time_step, + A, + B.transpose(1, 2), + C.transpose(1, 2), + self.D.float(), + gate, + time_proj_bias, + delta_softplus=True, + return_last_state=True, + ) + if ssm_state is not None and cache_params is not None: + cache_params.ssm_states[self.layer_idx].copy_(ssm_state) + + # 4. Final linear projection + contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) + return contextualized_states + + # fmt: off + def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None): + batch_size, seq_len, _ = input_states.shape + dtype = input_states.dtype + # 1. Gated MLP's linear projection + projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len] + hidden_states, gate = projected_states.chunk(2, dim=1) + + # 2. Convolution sequence transformation + if cache_params is not None: + ssm_state = cache_params.ssm_states[self.layer_idx].clone() + if cache_params.seqlen_offset > 0: + conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size] + conv_state = torch.roll(conv_state, shifts=-1, dims=-1) + conv_state[:, :, -1] = hidden_states[:, :, 0] + cache_params.conv_states[self.layer_idx].copy_(conv_state) + hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) + if self.use_conv_bias: + hidden_states += self.conv1d.bias + hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding + else: + conv_state = nn.functional.pad( + hidden_states, + (self.conv_kernel_size - hidden_states.shape[-1], 0) + ) + cache_params.conv_states[self.layer_idx].copy_(conv_state) + hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] + else: + ssm_state = torch.zeros( + (batch_size, self.intermediate_size, self.ssm_state_size), + device=hidden_states.device, dtype=dtype + ) + hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] + + # 3. State Space Model sequence transformation + # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2] + ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) + time_step, B, C = torch.split( + ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 + ) + discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size] + discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len] + + # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM) + A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size] + discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size] + discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size] + deltaB_u = discrete_B * hidden_states[:, :, :, None].float() + + # 3.c perform the recurrence y ← SSM(A, B, C)(x) + scan_outputs = [] + for i in range(seq_len): + ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state] + scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1] + scan_outputs.append(scan_output[:, :, 0]) + scan_output = torch.stack(scan_outputs, dim=-1) # [batch, seq_len, intermediade_size] + scan_output = scan_output + (hidden_states * self.D[None, :, None]) + scan_output = (scan_output * self.act(gate)) + + if cache_params is not None: + cache_params.ssm_states[self.layer_idx].copy_(ssm_state) + + # 4. Final linear projection + contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size] + return contextualized_states + # fmt: on + + def forward(self, hidden_states, cache_params: Optional[MambaCache] = None): + if is_fast_path_available and "cuda" in self.x_proj.weight.device.type: + return self.cuda_kernels_forward(hidden_states, cache_params) + return self.slow_forward(hidden_states, cache_params) + + +class MambaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + MambaRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +class MambaBlock(nn.Module): + def __init__(self, config, layer_idx): + super().__init__() + self.config = config + self.layer_idx = layer_idx + self.residual_in_fp32 = config.residual_in_fp32 + self.norm = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + self.mixer = MambaMixer(config, layer_idx=layer_idx) + + def forward(self, hidden_states, cache_params: Optional[MambaCache] = None): + residual = hidden_states + hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + + hidden_states = self.mixer(hidden_states, cache_params=cache_params) + hidden_states = residual + hidden_states + return hidden_states + + +class MambaPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MambaConfig + base_model_prefix = "backbone" + _no_split_modules = ["MambaBlock"] + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, MambaMixer): + module.A_log._no_weight_decay = True + module.D._no_weight_decay = True + + dt_init_std = self.config.time_step_rank**-0.5 * self.config.time_step_scale + if self.config.time_step_init_scheme == "constant": + nn.init.constant_(module.dt_proj.weight, dt_init_std) + elif self.config.time_step_init_scheme == "random": + nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std) + + dt = torch.exp( + torch.rand(self.config.intermediate_size) + * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + + math.log(self.config.time_step_min) + ).clamp(min=self.config.time_step_floor) + # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + module.dt_proj.bias.copy_(inv_dt) + module.dt_proj.bias._no_reinit = True + + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=self.config.initializer_range) + + if self.config.rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(self.config.num_layers) + + +@dataclass +class MambaOutput(ModelOutput): + """ + Class for the MAMBA model outputs. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + cache_params (`MambaCache`): + The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to + avoid providing the old `input_ids`. + + Includes both the State space model state matrices after the selective scan, and the Convolutional states + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + last_hidden_state: Optional[torch.FloatTensor] = None + cache_params: Optional[MambaCache] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MambaCausalLMOutput(ModelOutput): + """ + Base class for causal language model (or autoregressive) outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss (for next-token prediction). + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + cache_params (`MambaCache`): + The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to + avoid providing the old `input_ids`. + + Includes both the State space model state matrices after the selective scan, and the Convolutional states + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + loss: Optional[torch.FloatTensor] = None + logits: Optional[torch.FloatTensor] = None + cache_params: Optional[MambaCache] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +MAMBA_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MambaConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MAMBA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): + Indices of input sequence tokens in the vocabulary. + + If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as + `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + cache_params (`MambaCache`, *optional*): + If passed along, the model uses the previous state in all the blocks (which will give the output for the + `input_ids` provided as if the model add `state_input_ids + input_ids` as context). + use_cache (`bool`, *optional*): + If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare MAMBA Model transformer outputting raw hidden-states without any specific head on top.", + MAMBA_START_DOCSTRING, +) +class MambaModel(MambaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) + self.layers = nn.ModuleList([MambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) + + self.gradient_checkpointing = False + self.norm_f = MambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + # Initialize weights and apply final processing + self._register_load_state_dict_pre_hook(self.load_hook) + self.post_init() + + def load_hook(self, state_dict, prefix, *args): + for k in state_dict: + if "embedding." in k: + state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k) + break + + def get_input_embeddings(self): + return self.embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings = new_embeddings + + @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MambaOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + cache_params: Optional[MambaCache] = None, + use_cache: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, # `attention_mask` is passed by the tokenizer and we don't want it + ) -> Union[Tuple, MambaOutput]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if inputs_embeds is None: + inputs_embeds = self.embeddings(input_ids) + + if self.gradient_checkpointing and self.training and use_cache: + use_cache = False + + if cache_params is None and use_cache: + cache_params = MambaCache( + self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype + ) + + hidden_states = inputs_embeds + all_hidden_states = () if output_hidden_states else None + for mixer_block in self.layers: + if self.gradient_checkpointing and self.training: + hidden_states = self._gradient_checkpointing_func(mixer_block.__call__, hidden_states, cache_params) + else: + hidden_states = mixer_block(hidden_states, cache_params=cache_params) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if use_cache: + cache_params.seqlen_offset += inputs_embeds.shape[1] + + hidden_states = self.norm_f(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None) + + return MambaOutput( + last_hidden_state=hidden_states, + cache_params=cache_params if use_cache else None, + hidden_states=all_hidden_states, + ) + + +@add_start_docstrings( + """ + The MAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + MAMBA_START_DOCSTRING, +) +class MambaForCausalLM(MambaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.backbone = MambaModel(config) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def get_input_embeddings(self): + return self.backbone.get_input_embeddings() + + def set_input_embeddings(self, new_embeddings): + return self.backbone.set_input_embeddings(new_embeddings) + + def _update_model_kwargs_for_generation( + self, outputs: ModelOutput, model_kwargs: Dict[str, Any], **kwargs + ) -> Dict[str, Any]: + model_kwargs["cache_params"] = outputs.get("cache_params", None) + return model_kwargs + + def prepare_inputs_for_generation( + self, input_ids, cache_params: Optional[MambaCache] = None, inputs_embeds=None, attention_mask=None, **kwargs + ): + # only last token for inputs_ids if the state is passed along. + if cache_params is not None: + input_ids = input_ids[:, -1].unsqueeze(-1) + + if inputs_embeds is not None and cache_params is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs["cache_params"] = cache_params + return model_inputs + + @add_start_docstrings_to_model_forward(MAMBA_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MambaCausalLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + cache_params: Optional[MambaCache] = None, + labels: Optional[torch.LongTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + use_cache: Optional[bool] = None, + **kwargs, # for now we need this for generation + ) -> Union[Tuple, MambaCausalLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + mamba_outputs = self.backbone( + input_ids, + cache_params=cache_params, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + use_cache=use_cache, + ) + hidden_states = mamba_outputs[0] + + logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (logits,) + mamba_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return MambaCausalLMOutput( + loss=loss, + logits=logits, + cache_params=mamba_outputs.cache_params, + hidden_states=mamba_outputs.hidden_states, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/rag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b238c6290832e8ab12de08cb5defb8f6924ad71c --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/__init__.py @@ -0,0 +1,82 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available + + +_import_structure = { + "configuration_rag": ["RagConfig"], + "retrieval_rag": ["RagRetriever"], + "tokenization_rag": ["RagTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_rag"] = [ + "RagModel", + "RagPreTrainedModel", + "RagSequenceForGeneration", + "RagTokenForGeneration", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_rag"] = [ + "TFRagModel", + "TFRagPreTrainedModel", + "TFRagSequenceForGeneration", + "TFRagTokenForGeneration", + ] + + +if TYPE_CHECKING: + from .configuration_rag import RagConfig + from .retrieval_rag import RagRetriever + from .tokenization_rag import RagTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_rag import ( + TFRagModel, + TFRagPreTrainedModel, + TFRagSequenceForGeneration, + TFRagTokenForGeneration, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e5c92d1839f1b3c18ea2675507cfa323ed94d1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e7a73b2c3c0dad9385dee68e1c994367434fbe1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5bbb47cbbb06231b2ba499fbdd72f6eb33d940d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfeed16d797f34e7983995c137c418c301c2b110 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f1e67c61775941bbab9b521977cb7bb466c6928 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b23f1a5e32e92b597867081d37a2a1117d99a06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py b/venv/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py new file mode 100644 index 0000000000000000000000000000000000000000..2229e485db4ed20c5480c29f369a667aa2390943 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/configuration_rag.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# Copyright 2020, The RAG Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" RAG model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import add_start_docstrings + + +RAG_CONFIG_DOC = r""" + [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and + can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. + + Args: + title_sep (`str`, *optional*, defaults to `" / "`): + Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. + doc_sep (`str`, *optional*, defaults to `" // "`): + Separator inserted between the text of the retrieved document and the original input when calling + [`RagRetriever`]. + n_docs (`int`, *optional*, defaults to 5): + Number of documents to retrieve. + max_combined_length (`int`, *optional*, defaults to 300): + Max length of contextualized input returned by [`~RagRetriever.__call__`]. + retrieval_vector_size (`int`, *optional*, defaults to 768): + Dimensionality of the document embeddings indexed by [`RagRetriever`]. + retrieval_batch_size (`int`, *optional*, defaults to 8): + Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated + [`RagRetriever`]. + dataset (`str`, *optional*, defaults to `"wiki_dpr"`): + A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids + using `datasets.list_datasets()`). + dataset_split (`str`, *optional*, defaults to `"train"`) + Which split of the `dataset` to load. + index_name (`str`, *optional*, defaults to `"compressed"`) + The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and + `"compressed"`. + index_path (`str`, *optional*) + The path to the serialized faiss index on disk. + passages_path (`str`, *optional*): + A path to text passages compatible with the faiss index. Required if using + [`~models.rag.retrieval_rag.LegacyIndex`] + use_dummy_dataset (`bool`, *optional*, defaults to `False`) + Whether to load a "dummy" variant of the dataset specified by `dataset`. + label_smoothing (`float`, *optional*, defaults to 0.0): + Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing + in the loss calculation. If set to 0, no label smoothing is performed. + do_marginalize (`bool`, *optional*, defaults to `False`): + If `True`, the logits are marginalized over all documents by making use of + `torch.nn.functional.log_softmax`. + reduce_loss (`bool`, *optional*, defaults to `False`): + Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. + do_deduplication (`bool`, *optional*, defaults to `True`): + Whether or not to deduplicate the generations from different context documents for a given input. Has to be + set to `False` if used while training with distributed backend. + exclude_bos_score (`bool`, *optional*, defaults to `False`): + Whether or not to disregard the BOS token when computing the loss. + output_retrieved(`bool`, *optional*, defaults to `False`): + If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and + `context_attention_mask` are returned. See returned tensors for more detail. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + forced_eos_token_id (`int`, *optional*): + The id of the token to force as the last generated token when `max_length` is reached. Usually set to + `eos_token_id`. +""" + + +@add_start_docstrings(RAG_CONFIG_DOC) +class RagConfig(PretrainedConfig): + model_type = "rag" + is_composition = True + + def __init__( + self, + vocab_size=None, + is_encoder_decoder=True, + prefix=None, + bos_token_id=None, + pad_token_id=None, + eos_token_id=None, + decoder_start_token_id=None, + title_sep=" / ", + doc_sep=" // ", + n_docs=5, + max_combined_length=300, + retrieval_vector_size=768, + retrieval_batch_size=8, + dataset="wiki_dpr", + dataset_split="train", + index_name="compressed", + index_path=None, + passages_path=None, + use_dummy_dataset=False, + reduce_loss=False, + label_smoothing=0.0, + do_deduplication=True, + exclude_bos_score=False, + do_marginalize=False, + output_retrieved=False, + use_cache=True, + forced_eos_token_id=None, + dataset_revision=None, + **kwargs, + ): + super().__init__( + bos_token_id=bos_token_id, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + decoder_start_token_id=decoder_start_token_id, + forced_eos_token_id=forced_eos_token_id, + is_encoder_decoder=is_encoder_decoder, + prefix=prefix, + vocab_size=vocab_size, + **kwargs, + ) + assert ( + "question_encoder" in kwargs and "generator" in kwargs + ), "Config has to be initialized with question_encoder and generator config" + question_encoder_config = kwargs.pop("question_encoder") + question_encoder_model_type = question_encoder_config.pop("model_type") + decoder_config = kwargs.pop("generator") + decoder_model_type = decoder_config.pop("model_type") + + from ..auto.configuration_auto import AutoConfig + + self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config) + self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config) + + self.reduce_loss = reduce_loss + self.label_smoothing = label_smoothing + self.exclude_bos_score = exclude_bos_score + self.do_marginalize = do_marginalize + + self.title_sep = title_sep + self.doc_sep = doc_sep + self.n_docs = n_docs + self.max_combined_length = max_combined_length + + self.dataset = dataset + self.dataset_split = dataset_split + self.index_name = index_name + + self.retrieval_vector_size = retrieval_vector_size + self.retrieval_batch_size = retrieval_batch_size + self.passages_path = passages_path + self.index_path = index_path + self.use_dummy_dataset = use_dummy_dataset + self.dataset_revision = dataset_revision + + self.output_retrieved = output_retrieved + + self.do_deduplication = do_deduplication + + self.use_cache = use_cache + + if self.forced_eos_token_id is None: + self.forced_eos_token_id = getattr(self.generator, "forced_eos_token_id", None) + + @classmethod + def from_question_encoder_generator_configs( + cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs + ) -> PretrainedConfig: + r""" + Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and + decoder model configuration. + + Returns: + [`EncoderDecoderConfig`]: An instance of a configuration object + """ + return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py b/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py new file mode 100644 index 0000000000000000000000000000000000000000..80dec5bc3dba586f2295753822066d57845f1326 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_rag.py @@ -0,0 +1,1628 @@ +# coding=utf-8 +# Copyright 2020, The RAG Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""RAG model implementation.""" + +import copy +from dataclasses import dataclass +from typing import Callable, List, Optional, Tuple, Union + +import torch +from torch import nn + +from ...configuration_utils import PretrainedConfig +from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList +from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_rag import RagConfig +from .retrieval_rag import RagRetriever + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "RagConfig" + + +@dataclass +class RetrievAugLMMarginOutput(ModelOutput): + """ + Base class for retriever augmented marginalized models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head. The score is possibly marginalized over all documents for + each vocabulary token. + doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_heads, sequence_length, embed_size_per_head)`). + + Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used + (see `past_key_values` input) to speed up sequential decoding. + retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): + Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute + the `doc_scores`. + retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): + The indexes of the embedded documents retrieved by the retriever. + context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. + context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden states at the output of the last layer of the question encoder pooled output of the + model. + question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. + question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the question encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the generator encoder of the model. + generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. + generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. + generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + doc_scores: torch.FloatTensor = None + past_key_values: Optional[List[torch.FloatTensor]] = None + retrieved_doc_embeds: Optional[torch.FloatTensor] = None + retrieved_doc_ids: Optional[torch.LongTensor] = None + context_input_ids: Optional[torch.LongTensor] = None + context_attention_mask: Optional[torch.LongTensor] = None + question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None + question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None + generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class RetrievAugLMOutput(ModelOutput): + """ + Args: + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head. The score is possibly marginalized over all documents for + each vocabulary token. + doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_heads, sequence_length, embed_size_per_head)`). + + Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used + (see `past_key_values` input) to speed up sequential decoding. + retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): + Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute + the `doc_scores`. + retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): + The indexes of the embedded documents retrieved by the retriever. + context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. + context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden states at the output of the last layer of the question encoder pooled output of the + model. + question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. + question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the question encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the generator encoder of the model. + generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. + generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. + generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + """ + + logits: torch.FloatTensor = None + doc_scores: torch.FloatTensor = None + past_key_values: Optional[List[torch.FloatTensor]] = None + retrieved_doc_embeds: Optional[torch.FloatTensor] = None + retrieved_doc_ids: Optional[torch.LongTensor] = None + context_input_ids: Optional[torch.LongTensor] = None + context_attention_mask: Optional[torch.LongTensor] = None + question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None + question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None + generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +class RagPreTrainedModel(PreTrainedModel): + r""" + RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP + Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. + + RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a + generator, the encoder and generator are trainable while the retriever is just an indexed dataset. + + """ + + config_class = RagConfig + base_model_prefix = "rag" + + @classmethod + def from_pretrained(cls, *args, **kwargs): + # At the moment fast initialization is not supported + # for composite models + kwargs["_fast_init"] = False + return super().from_pretrained(*args, **kwargs) + + @classmethod + def from_pretrained_question_encoder_generator( + cls, + question_encoder_pretrained_model_name_or_path: str = None, + generator_pretrained_model_name_or_path: str = None, + retriever: RagRetriever = None, + **kwargs, + ) -> PreTrainedModel: + r""" + Instantiates an question encoder and a generator from one or two base classes of the library from pretrained + model checkpoints. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you need to first set it back in training mode with `model.train()`. + + Params: + question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): + Information necessary to initiate the question encoder. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): + Information necessary to initiate the generator. Can be either: + + - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. + - A path to a *directory* containing model weights saved using + [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In + this case, `from_tf` should be set to `True` and a configuration object should be provided as + `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a + PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + model_args (remaining positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + retriever ([`RagRetriever`], *optional*): + The retriever to use. + kwwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the question_encoder configuration, use the prefix *question_encoder_* for each + configuration parameter. + - To update the generator configuration, use the prefix *generator_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import RagModel + + >>> # initialize a RAG from two pretrained models. + >>> model = RagModel.from_pretrained_question_encoder_generator( + ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" + ... ) + >>> # saving model after fine-tuning + >>> model.save_pretrained("./rag") + >>> # load fine-tuned model + >>> model = RagModel.from_pretrained("./rag") + ```""" + + kwargs_question_encoder = { + argument[len("question_encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("question_encoder_") + } + + kwargs_generator = { + argument[len("generator_") :]: value + for argument, value in kwargs.items() + if argument.startswith("generator_") + } + + # remove question_encoder, generator kwargs from kwargs + for key in kwargs_question_encoder.keys(): + del kwargs["question_encoder_" + key] + for key in kwargs_generator.keys(): + del kwargs["generator_" + key] + + # Load and initialize the question_encoder and generator + # The distinction between question_encoder and generator at the model level is made + # by the value of the flag `is_generator` that we need to set correctly. + question_encoder = kwargs_question_encoder.pop("model", None) + if question_encoder is None: + assert question_encoder_pretrained_model_name_or_path is not None, ( + "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" + " be defined" + ) + from ..auto.modeling_auto import AutoModel + + if "config" not in kwargs_question_encoder: + from ..auto.configuration_auto import AutoConfig + + question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained( + question_encoder_pretrained_model_name_or_path, + **kwargs_question_encoder, + return_unused_kwargs=True, + ) + kwargs_question_encoder["config"] = question_encoder_config + + question_encoder = AutoModel.from_pretrained( + question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder + ) + + generator = kwargs_generator.pop("model", None) + if generator is None: + assert generator_pretrained_model_name_or_path is not None, ( + "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" + " to be defined" + ) + from ..auto.modeling_auto import AutoModelForSeq2SeqLM + + if "config" not in kwargs_generator: + from ..auto.configuration_auto import AutoConfig + + generator_config, kwargs_generator = AutoConfig.from_pretrained( + generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True + ) + + kwargs_generator["config"] = generator_config + + generator = AutoModelForSeq2SeqLM.from_pretrained( + generator_pretrained_model_name_or_path, **kwargs_generator + ) + + # instantiate config with corresponding kwargs + config = kwargs.get("config", None) + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + + return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) + + +RAG_START_DOCSTRING = r""" + + RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward + pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context + documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator. + + The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be + any *seq2seq* model, preferably [`BartForConditionalGeneration`]. + + The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the + outputs of a retriever in multiple steps---see examples for more details. The model is compatible any + *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`. + It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or + [`T5ForConditionalGeneration`] as the `generator`. + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + + Args: + config ([`RagConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. + question_encoder ([`PreTrainedModel`]): + An encoder model compatible with the faiss index encapsulated by the `retriever`. + generator ([`PreTrainedModel`]): + A seq2seq model used as the generator in the RAG architecture. + retriever ([`RagRetriever`]): + A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. +""" + + +RAG_FORWARD_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies + which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to + obtain the indices. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*) + Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, + *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * + sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the + generator's encoder. + + Used by the ([`RagModel`]) model during decoding. + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Provide for generation tasks. `None` by default, construct as per instructions for the generator model + you're using with your RAG instance. + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + past_key_values (`tuple(tuple(torch.FloatTensor))`): + Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and + `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used + in the ([`RagTokenForGeneration`]) model during decoding. + doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` + has to be provided to the forward pass. `doc_scores` can be computed via + `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. + context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to + the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be + provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_retrieved(`bool`, *optional*): + Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and + `context_attention_mask`. See returned tensors for more detail. + n_docs (`int`, *optional*, defaults to `config.n_docs``) + Number of documents to retrieve and/or number of documents for which to generate an answer. +""" + + +@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) +class RagModel(RagPreTrainedModel): + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[PreTrainedModel] = None, + generator: Optional[PreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an question_encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + else: + assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" + super().__init__(config) + if question_encoder is None: + from ..auto.modeling_auto import AutoModel + + question_encoder = AutoModel.from_config(config.question_encoder) + + if generator is None: + from ..auto.modeling_auto import AutoModelForSeq2SeqLM + + generator = AutoModelForSeq2SeqLM.from_config(config.generator) + + self.retriever = retriever + if self.retriever is not None: + assert isinstance( + retriever, RagRetriever + ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" + self.retriever = retriever + + self.question_encoder = question_encoder + self.generator = generator + + self.ctx_encoder = None + self.context_encoder_training = False + + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + doc_scores: Optional[torch.FloatTensor] = None, + context_input_ids: Optional[torch.LongTensor] = None, + context_attention_mask: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + n_docs: Optional[int] = None, + ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RagRetriever, RagModel + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever) + + >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") + >>> outputs = model(input_ids=inputs["input_ids"]) + ```""" + n_docs = n_docs if n_docs is not None else self.config.n_docs + use_cache = use_cache if use_cache is not None else self.config.use_cache + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved + + # whether retriever has to be used + has_to_retrieve = ( + self.retriever is not None + and (context_input_ids is None or context_attention_mask is None or doc_scores is None) + and encoder_outputs is None + ) + # encoder_outputs are pre-computed during RAG-token generation + if encoder_outputs is None: + if has_to_retrieve: + question_enc_outputs = self.question_encoder( + input_ids, attention_mask=attention_mask, return_dict=True + ) + question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder + + retriever_outputs = self.retriever( + input_ids, + question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="pt", + ) + if self.context_encoder_training: + ( + context_input_ids, + context_attention_mask, + retrieved_doc_embeds, + retrived_doc_input_ids, + retrived_doc_attention_mask, + retrieved_doc_ids, + ) = ( + retriever_outputs["context_input_ids"], + retriever_outputs["context_attention_mask"], + retriever_outputs["retrieved_doc_embeds"], + retriever_outputs["tokenized_doc_ids"], + retriever_outputs["tokenized_doc_attention_mask"], + retriever_outputs["doc_ids"], + ) + + context_input_ids = context_input_ids.to(input_ids) + context_attention_mask = context_attention_mask.to(input_ids) + + retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids) + retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids) + retrieved_doc_embeds = self.ctx_encoder( + retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True + ).pooler_output + retrieved_doc_embeds = retrieved_doc_embeds.view( + -1, n_docs, question_encoder_last_hidden_state.shape[1] + ) # reshaping + + # compute doc_scores involving ctx_encoder + doc_scores = torch.bmm( + question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) + ).squeeze(1) + + else: + context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( + retriever_outputs["context_input_ids"], + retriever_outputs["context_attention_mask"], + retriever_outputs["retrieved_doc_embeds"], + retriever_outputs["doc_ids"], + ) + + # set to correct device + retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state) + context_input_ids = context_input_ids.to(input_ids) + context_attention_mask = context_attention_mask.to(input_ids) + + # compute doc_scores + doc_scores = torch.bmm( + question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2) + ).squeeze(1) + else: + assert context_input_ids is not None, ( + "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" + " set a retriever using the `set_retriever(...)` function." + ) + assert context_attention_mask is not None, ( + "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" + " can set a retriever using the `set_retriever(...)` function." + ) + assert doc_scores is not None, ( + "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" + " retriever using the `set_retriever(...)` function." + ) + + assert ( + doc_scores is not None + ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." + + assert (doc_scores.shape[1] % n_docs) == 0, ( + f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" + f" {context_input_ids.shape[0]}." + ) + + # Decoder input without context documents + if decoder_input_ids is not None: + decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0) + + if decoder_attention_mask is not None: + decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0) + + gen_outputs = self.generator( + input_ids=context_input_ids, + attention_mask=context_attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + return_dict=True, + ) + + if not has_to_retrieve: + question_encoder_last_hidden_state = None + question_enc_hidden_states = None + question_enc_attentions = None + retrieved_doc_embeds = None + retrieved_doc_ids = None + else: + question_enc_hidden_states = question_enc_outputs.hidden_states + question_enc_attentions = question_enc_outputs.attentions + + if not has_to_retrieve or not output_retrieved: + # don't output retrieved docs + context_input_ids = (None,) + context_attention_mask = None + retrieved_doc_embeds = None + retrieved_doc_ids = None + + return RetrievAugLMOutput( + logits=gen_outputs.logits, + doc_scores=doc_scores, + past_key_values=gen_outputs.past_key_values, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + retrieved_doc_embeds=retrieved_doc_embeds, + retrieved_doc_ids=retrieved_doc_ids, + question_encoder_last_hidden_state=question_encoder_last_hidden_state, + question_enc_hidden_states=question_enc_hidden_states, + question_enc_attentions=question_enc_attentions, + generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, + generator_enc_hidden_states=gen_outputs.encoder_hidden_states, + generator_enc_attentions=gen_outputs.encoder_attentions, + generator_dec_hidden_states=gen_outputs.decoder_hidden_states, + generator_dec_attentions=gen_outputs.decoder_attentions, + generator_cross_attentions=gen_outputs.cross_attentions, + ) + + +@add_start_docstrings_to_model_forward( + """ + A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. + """, + RAG_START_DOCSTRING, +) +class RagSequenceForGeneration(RagPreTrainedModel): + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[PreTrainedModel] = None, + generator: Optional[PreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + super().__init__(config) + + # instantiate model + self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) + + def set_retriever(self, retriever: RagRetriever): + self.rag.retriever = retriever + + def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): + self.rag.context_encoder_training = True + self.rag.ctx_encoder = ctx_encoder + + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + context_input_ids: Optional[torch.LongTensor] = None, + context_attention_mask: Optional[torch.LongTensor] = None, + doc_scores: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + exclude_bos_score: Optional[bool] = None, + reduce_loss: Optional[bool] = None, + labels: Optional[torch.LongTensor] = None, + n_docs: Optional[int] = None, + **kwargs, # needs kwargs for generation + ) -> RetrievAugLMMarginOutput: + r""" + exclude_bos_score (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing + the loss. + reduce_loss (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` + operation. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Legacy dictionary, which is required so that model can use *generate()* function. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) + + >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") + >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") + >>> input_ids = inputs["input_ids"] + >>> labels = targets["input_ids"] + >>> outputs = model(input_ids=input_ids, labels=labels) + + >>> # or use retriever separately + >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True) + >>> # 1. Encode + >>> question_hidden_states = model.question_encoder(input_ids)[0] + >>> # 2. Retrieve + >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") + >>> doc_scores = torch.bmm( + ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) + ... ).squeeze(1) + >>> # 3. Forward to generator + >>> outputs = model( + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... decoder_input_ids=labels, + ... ) + ```""" + n_docs = n_docs if n_docs is not None else self.config.n_docs + exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score + reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = labels + use_cache = False + + outputs = self.rag( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + doc_scores=doc_scores, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_retrieved=output_retrieved, + n_docs=n_docs, + ) + + loss = None + if labels is not None: + loss = self.get_nll( + outputs.logits, + outputs.doc_scores, + decoder_input_ids, + reduce_loss=reduce_loss, + epsilon=self.config.label_smoothing, + exclude_bos_score=exclude_bos_score, + n_docs=n_docs, + ) + + return RetrievAugLMMarginOutput( + loss=loss, + logits=outputs.logits, + doc_scores=outputs.doc_scores, + past_key_values=outputs.past_key_values, + context_input_ids=outputs.context_input_ids, + context_attention_mask=outputs.context_attention_mask, + retrieved_doc_embeds=outputs.retrieved_doc_embeds, + retrieved_doc_ids=outputs.retrieved_doc_ids, + question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, + question_enc_hidden_states=outputs.question_enc_hidden_states, + question_enc_attentions=outputs.question_enc_attentions, + generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, + generator_enc_hidden_states=outputs.generator_enc_hidden_states, + generator_enc_attentions=outputs.generator_enc_attentions, + generator_dec_hidden_states=outputs.generator_dec_hidden_states, + generator_dec_attentions=outputs.generator_dec_attentions, + generator_cross_attentions=outputs.generator_cross_attentions, + ) + + @property + def retriever(self): + return self.rag.retriever + + @property + def generator(self): + return self.rag.generator + + @property + def question_encoder(self): + return self.rag.question_encoder + + @torch.no_grad() + def generate( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + context_input_ids: Optional[torch.LongTensor] = None, + context_attention_mask: Optional[torch.LongTensor] = None, + doc_scores: Optional[torch.FloatTensor] = None, + do_deduplication: Optional[bool] = None, # defaults to True + num_return_sequences: Optional[int] = None, # defaults to 1 + num_beams: Optional[int] = None, # defaults to 1 + n_docs: Optional[int] = None, + **model_kwargs, + ) -> torch.LongTensor: + """ + Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation + for more information on how to set other generate input parameters. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + The sequence used as a prompt for the generation. If `input_ids` is not passed, then + `context_input_ids` has to be provided. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder input_ids by the + retriever. + context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and + `context_attention_mask` have to be provided to the forward pass. They are returned by + [`~RagRetriever.__call__`]. + doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + + If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be + provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. + do_deduplication (`bool`, *optional*): + Whether or not to deduplicate the generations from different context documents for a given input. Has + to be set to `False` if used while training with distributed backend. + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. Note that this + is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, + where we set `num_return_sequences` to `num_beams`. + num_beams (`int`, *optional*, defaults to 1): + Number of beams for beam search. 1 means no beam search. + n_docs (`int`, *optional*, defaults to `config.n_docs`) + Number of documents to retrieve and/or number of documents for which to generate an answer. + kwargs (`Dict[str, Any]`, *optional*): + Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]. + + Return: + `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated + sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches + finished early due to the `eos_token_id`. + """ + + n_docs = n_docs if n_docs is not None else self.config.n_docs + do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication + num_doc_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences + ) + num_beams = num_beams if num_beams is not None else self.config.num_beams + + assert ( + input_ids is not None or context_input_ids is not None + ), " At least one of input_ids or context_input_ids must be given" + + if self.retriever is not None and context_input_ids is None: + question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] + context_input_ids = self.retriever( + input_ids, + question_hidden_states.cpu().detach().to(torch.float32).numpy(), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="pt", + )["context_input_ids"] + + # set to correct device + context_input_ids = context_input_ids.to(input_ids) + + hypos = [] + model_kwargs["num_beams"] = num_beams + model_kwargs["num_return_sequences"] = num_beams + model_kwargs["attention_mask"] = None + + batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs + + for index in range(batch_size): + # first, generate beams from documents: + generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) + + output_sequences = self.generator.generate( + generator_input_ids, + **model_kwargs, + ) # n_docs * n_beam, tgt_len + if do_deduplication: + # do_deduplication, max_output_len + output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values())) + + num_candidates = output_sequences.shape[ + 0 + ] # after deduplication, this number can be less than n_docs*n_beam + + # then, run model forwards to get nll scores: + if input_ids is not None: + new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1) + outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) + else: # input_ids is None, need context_input_ids/mask and doc_scores + assert context_attention_mask is not None, ( + "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" + " can set a retriever using the `set_retriever(...)` function." + ) + assert doc_scores is not None, ( + "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" + " retriever using the `set_retriever(...)` function." + ) + + individual_input_ids = generator_input_ids.repeat( + num_candidates, 1 + ) # (num_candidates*n_docs, max_len) + + individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] + individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1) + + individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] + individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs] + + outputs = self( + context_input_ids=individual_input_ids, + context_attention_mask=individual_attention_mask, + doc_scores=individual_doc_scores, + labels=output_sequences, + exclude_bos_score=True, + ) + + top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1] + + # add hypothesis + hypos.append(output_sequences[top_cand_inds]) + + return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) + + def get_nll( + self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None + ): + # shift tokens left + target = torch.cat( + [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 + ) + + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # bos_token_id is None for T5 + bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id + use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all() + + def _mask_pads(ll, smooth_obj): + pad_mask = target.eq(self.config.generator.pad_token_id) + if pad_mask.any(): + ll.masked_fill_(pad_mask, 0.0) + smooth_obj.masked_fill_(pad_mask, 0.0) + return ll.squeeze(-1), smooth_obj.squeeze(-1) + + # seq_logits dim = (batch*n_docs, tgt_len , #vocabs) + seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( + seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) + ) # batch_size x n_docs x tgt_len x #vocab_size + doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1) + + # RAG-sequence marginalization + first_token_scores = seq_logprobs[:, :, :1, :] + second_token_scores = seq_logprobs[:, :, 1:2, :] + remainder = seq_logprobs[:, :, 2:, :] + rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2) + + # calculate loss + target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1) + assert target.dim() == rag_logprobs.dim() + + ll = rag_logprobs.gather(dim=-1, index=target) + smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits + + ll, smooth_obj = _mask_pads(ll, smooth_obj) + + # sum over tokens, exclude bos while scoring + ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2) + smooth_obj = smooth_obj.sum(2) + ll = ll.logsumexp(1) # logsumexp over docs + smooth_obj = smooth_obj.logsumexp(1) + + nll_loss = -ll + smooth_loss = -smooth_obj + + if reduce_loss: + nll_loss = nll_loss.sum() + smooth_loss = smooth_loss.sum() + + eps_i = epsilon / rag_logprobs.size(-1) + loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss + return loss + + @staticmethod + def _cat_and_pad(tensors, pad_token_id): + output = ( + tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id) + ) + ind = 0 + for t in tensors: + output[ind : ind + t.shape[0], : t.shape[1]] = t + ind += t.shape[0] + return output + + +@add_start_docstrings_to_model_forward( + """ + A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. + """, + RAG_START_DOCSTRING, +) +class RagTokenForGeneration(RagPreTrainedModel): + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[PreTrainedModel] = None, + generator: Optional[PreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + + super().__init__(config) + + # instantiate model + self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever) + + def set_retriever(self, retriever: RagRetriever): + self.rag.retriever = retriever + + def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel): + self.rag.context_encoder_training = True + self.rag.ctx_encoder = ctx_encoder + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + doc_scores=None, + n_docs=None, + **kwargs, + ): + if past_key_values is not None: + # if past is defined use only last decoder_input_ids + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, + "encoder_outputs": encoder_outputs, + "doc_scores": doc_scores, + "context_attention_mask": attention_mask, + "decoder_input_ids": decoder_input_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "do_marginalize": True, + "n_docs": n_docs, + } + + @property + def retriever(self): + return self.rag.retriever + + @property + def generator(self): + return self.rag.generator + + @property + def question_encoder(self): + return self.rag.question_encoder + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs""" + + def _reorder_stacked(hidden_states, new_order): + n_docs = hidden_states.shape[0] // new_order.shape[0] + hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:]) + hidden_states = hidden_states.index_select(0, new_order) + result = hidden_states.view(-1, *hidden_states.shape[2:]) + return result + + reordered_past = () + for layer_past in past_key_values: + # get the correct batch idx from decoder layer's batch dim for cross and self-attn + reordered_past += ( + tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + + return reordered_past + + def marginalize(self, seq_logits, doc_scores, n_docs=None): + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # RAG-token marginalization + seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view( + seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1) + ) + doc_logprobs = torch.log_softmax(doc_scores, dim=1) + log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1) + return torch.logsumexp(log_prob_sum, dim=1) + + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + context_input_ids: Optional[torch.LongTensor] = None, + context_attention_mask: Optional[torch.LongTensor] = None, + doc_scores: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + do_marginalize: Optional[bool] = None, + reduce_loss: Optional[bool] = None, + labels: Optional[torch.LongTensor] = None, + n_docs: Optional[int] = None, + **kwargs, # needs kwargs for generation + ) -> RetrievAugLMMarginOutput: + r""" + do_marginalize (`bool`, *optional*): + If `True`, the logits are marginalized over all documents by making use of + `torch.nn.functional.log_softmax`. + reduce_loss (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum` + operation. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Legacy dictionary, which is required so that model can use *generate()* function. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) + + >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt") + >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt") + >>> input_ids = inputs["input_ids"] + >>> labels = targets["input_ids"] + >>> outputs = model(input_ids=input_ids, labels=labels) + + >>> # or use retriever separately + >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True) + >>> # 1. Encode + >>> question_hidden_states = model.question_encoder(input_ids)[0] + >>> # 2. Retrieve + >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt") + >>> doc_scores = torch.bmm( + ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2) + ... ).squeeze(1) + >>> # 3. Forward to generator + >>> outputs = model( + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... decoder_input_ids=labels, + ... ) + + >>> # or directly generate + >>> generated = model.generate( + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... ) + >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) + ```""" + n_docs = n_docs if n_docs is not None else self.config.n_docs + do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize + reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = labels + use_cache = False + + outputs = self.rag( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + doc_scores=doc_scores, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_retrieved=output_retrieved, + n_docs=n_docs, + ) + + loss = None + logits = outputs.logits + if labels is not None: + assert decoder_input_ids is not None + loss = self.get_nll( + outputs.logits, + outputs.doc_scores, + labels, + reduce_loss=reduce_loss, + epsilon=self.config.label_smoothing, + n_docs=n_docs, + ) + + if do_marginalize: + logits = self.marginalize(logits, outputs.doc_scores, n_docs) + + return RetrievAugLMMarginOutput( + loss=loss, + logits=logits, + doc_scores=outputs.doc_scores, + past_key_values=outputs.past_key_values, + context_input_ids=outputs.context_input_ids, + context_attention_mask=outputs.context_attention_mask, + retrieved_doc_embeds=outputs.retrieved_doc_embeds, + retrieved_doc_ids=outputs.retrieved_doc_ids, + question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, + question_enc_hidden_states=outputs.question_enc_hidden_states, + question_enc_attentions=outputs.question_enc_attentions, + generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, + generator_enc_hidden_states=outputs.generator_enc_hidden_states, + generator_enc_attentions=outputs.generator_enc_attentions, + generator_dec_hidden_states=outputs.generator_dec_hidden_states, + generator_dec_attentions=outputs.generator_dec_attentions, + generator_cross_attentions=outputs.generator_cross_attentions, + ) + + @torch.no_grad() + def generate( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + context_input_ids: Optional[torch.LongTensor] = None, + context_attention_mask: Optional[torch.LongTensor] = None, + doc_scores: Optional[torch.FloatTensor] = None, + n_docs: Optional[int] = None, + generation_config: Optional[GenerationConfig] = None, + prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None, + logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(), + stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(), + **kwargs, + ) -> torch.LongTensor: + """ + Implements RAG token decoding. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + The sequence used as a prompt for the generation. If `input_ids` is not passed, then + `context_input_ids` has to be provided. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + n_docs (`int`, *optional*, defaults to `config.n_docs`) + Number of documents to retrieve and/or number of documents for which to generate an answer. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which has the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): + If provided, this function constraints the beam search to allowed tokens only at each step. If not + provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID + `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on + the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for + constrained generation conditioned on the prefix, as described in [Autoregressive Entity + Retrieval](https://arxiv.org/abs/2010.00904). + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and a + model's config. If a logit processor is passed that is already created with the arguments or a model's + config an error is thrown. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + model's config. If a stopping criteria is passed that is already created with the arguments or a + model's config an error is thrown. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. + + Return: + `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated + sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches + finished early due to the `eos_token_id`. + """ + # Handle `generation_config` and kwargs that might update it + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + + # set default parameters + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # retrieve docs + if self.retriever is not None and context_input_ids is None: + question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] + out = self.retriever( + input_ids, + question_hidden_states.cpu().detach().to(torch.float32).numpy(), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="pt", + ) + context_input_ids, context_attention_mask, retrieved_doc_embeds = ( + out["context_input_ids"], + out["context_attention_mask"], + out["retrieved_doc_embeds"], + ) + + # set to correct device + retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states) + context_input_ids = context_input_ids.to(input_ids) + context_attention_mask = context_attention_mask.to(input_ids) + + # compute doc_scores + doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze( + 1 + ) + + assert (context_input_ids.shape[0] % n_docs) == 0, ( + f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" + f" {context_input_ids.shape[0]}." + ) + + # batch_size + batch_size = context_input_ids.shape[0] // n_docs + + encoder = self.rag.generator.get_encoder() + encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True) + + input_ids = torch.full( + (batch_size * generation_config.num_beams, 1), + generation_config.decoder_start_token_id, + dtype=torch.long, + device=next(self.parameters()).device, + ) + input_ids_seq_length = input_ids.shape[-1] + last_hidden_state = encoder_outputs["last_hidden_state"] + + def extend_enc_output(tensor, num_beams=None): + # split into `batch_size`, `num_beams`, `num_docs` + tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:]) + # repeat same last hidden states over `num_beams` dimension + tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:]) + # merge `batch_size`, `num_beams`, `num_docs` dims again + return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:]) + + # correctly extend last_hidden_state and attention mask + context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) + encoder_outputs["last_hidden_state"] = extend_enc_output( + last_hidden_state, num_beams=generation_config.num_beams + ) + + doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0) + + # define start_len & additional parameters + model_kwargs["doc_scores"] = doc_scores + model_kwargs["encoder_outputs"] = encoder_outputs + model_kwargs["attention_mask"] = context_attention_mask + model_kwargs["n_docs"] = n_docs + + pre_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=context_input_ids, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + ) + + if generation_config.num_beams == 1: + if generation_config.num_return_sequences > 1: + raise ValueError( + f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" + " greedy search." + ) + return self._greedy_search( + input_ids, + logits_processor=pre_processor, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + **model_kwargs, + ) + elif generation_config.num_beams > 1: + if generation_config.num_return_sequences > generation_config.num_beams: + raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=self.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + return self._beam_search( + input_ids, + beam_scorer, + logits_processor=pre_processor, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + **model_kwargs, + ) + else: + raise ValueError( + f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" + ) + + def get_input_embeddings(self): + return self.rag.generator.get_input_embeddings() + + def get_output_embeddings(self): + return self.rag.generator.get_output_embeddings() + + def set_output_embeddings(self, new_embeddings): + return self.rag.generator.set_output_embeddings(new_embeddings) + + def shift_tokens_right(self, input_ids, start_token_id=None): + """Shift input ids one token to the right, and pad with start_token_id""" + if start_token_id is None: + start_token_id = self.config.decoder_start_token_id + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = start_token_id + return shifted_input_ids + + def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): + n_docs = n_docs if n_docs is not None else self.config.n_docs + # shift tokens left + target = torch.cat( + [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1 + ) + + def _mask_pads(ll, smooth_obj): + pad_mask = target.eq(self.config.generator.pad_token_id) + if pad_mask.any(): + ll.masked_fill_(pad_mask, 0.0) + smooth_obj.masked_fill_(pad_mask, 0.0) + return ll.squeeze(-1), smooth_obj.squeeze(-1) + + rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) + + target = target.unsqueeze(-1) + assert target.dim() == rag_logprobs.dim() + + ll = rag_logprobs.gather(dim=-1, index=target) + smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits + ll, smooth_obj = _mask_pads(ll, smooth_obj) + ll = ll.sum(1) # sum over tokens + smooth_obj = smooth_obj.sum(1) + + nll_loss = -ll + smooth_loss = -smooth_obj + + if reduce_loss: + nll_loss = nll_loss.sum() + smooth_loss = smooth_loss.sum() + + eps_i = epsilon / rag_logprobs.size(-1) + loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss + return loss diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py b/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py new file mode 100644 index 0000000000000000000000000000000000000000..9d8ed6504975286516f7dac65d03022158f4249c --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py @@ -0,0 +1,1771 @@ +# coding=utf-8 +# Copyright 2020, The RAG Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TFRAG model implementation.""" + + +from __future__ import annotations + +import copy +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...configuration_utils import PretrainedConfig +from ...generation import TFLogitsProcessorList +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFModelInputType, + TFPreTrainedModel, + keras, + shape_list, + unpack_inputs, +) +from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_rag import RagConfig +from .retrieval_rag import RagRetriever + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "RagConfig" + + +@dataclass +class TFRetrievAugLMMarginOutput(ModelOutput): + """ + Base class for retriever augmented marginalized models outputs. + + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head. The score is possibly marginalized over all documents for + each vocabulary token. + past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, + sequence_length, embed_size_per_head)`). + + Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used + (see `past_key_values` input) to speed up sequential decoding. + doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): + Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute + the `doc_scores`. + retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): + The indexes of the embedded documents retrieved by the retriever. + context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. + context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden states at the output of the last layer of the question encoder pooled output of the + model. + question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. + question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the question encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the generator encoder of the model. + generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. + generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. + generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + """ + + loss: tf.Tensor | None = None + logits: tf.Tensor = None + past_key_values: List[tf.Tensor] | None = None + doc_scores: tf.Tensor | None = None + retrieved_doc_embeds: tf.Tensor | None = None + retrieved_doc_ids: tf.Tensor | None = None + context_input_ids: tf.Tensor | None = None + context_attention_mask: tf.Tensor | None = None + question_encoder_last_hidden_state: tf.Tensor | None = None + question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None + question_enc_attentions: Tuple[tf.Tensor, ...] | None = None + generator_enc_last_hidden_state: tf.Tensor | None = None + generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None + generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None + generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None + generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None + + +@dataclass +class TFRetrievAugLMOutput(ModelOutput): + """ + Args: + logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head. The score is possibly marginalized over all documents for + each vocabulary token. + past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, + sequence_length, embed_size_per_head)`). + + Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used + (see `past_key_values` input) to speed up sequential decoding. + doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): + Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute + the `doc_scores`. + retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): + The indexes of the embedded documents retrieved by the retriever. + context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. + context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden states at the output of the last layer of the question encoder pooled output of the + model. + question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. + question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the question encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the generator encoder of the model. + generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. + generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. + generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + """ + + logits: tf.Tensor = None + past_key_values: List[tf.Tensor] | None = None + doc_scores: tf.Tensor | None = None + retrieved_doc_embeds: tf.Tensor | None = None + retrieved_doc_ids: tf.Tensor | None = None + context_input_ids: tf.Tensor | None = None + context_attention_mask: tf.Tensor | None = None + question_encoder_last_hidden_state: tf.Tensor | None = None + question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None + question_enc_attentions: Tuple[tf.Tensor, ...] | None = None + generator_enc_last_hidden_state: tf.Tensor | None = None + generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None + generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None + generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None + generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None + + +class TFRagPreTrainedModel(TFPreTrainedModel): + r""" + RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP + Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. + + RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a + generator, the encoder and generator are trainable while the retriever is just an indexed dataset. + + """ + + config_class = RagConfig + base_model_prefix = "rag" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + @classmethod + def from_pretrained_question_encoder_generator( + cls, + question_encoder_pretrained_model_name_or_path: str = None, + generator_pretrained_model_name_or_path: str = None, + retriever: RagRetriever = None, + *model_args, + **kwargs, + ) -> TFPreTrainedModel: + r""" + Instantiates an question encoder and a generator from one or two base classes of the library from pretrained + model checkpoints. + + Params: + question_encoder_pretrained_model_name_or_path (`str`, *optional*): + Information necessary to initiate the question encoder. Can be either: + + - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., + `google-bert/bert-base-uncased`. + - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., + `dbmdz/bert-base-german-cased`. + - A path to a *directory* containing model weights saved using + [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, + `question_encoder_from_pt` should be set to `True`. + + generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): + Information necessary to initiate the generator. Can be either: + + - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., + `google-t5/t5-small`. + - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., + `facebook/bart-base`. + - A path to a *directory* containing model weights saved using + [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. + - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, + `generator_from_pt` should be set to `True`. + + model_args (remaining positional arguments, *optional*): + All remaining positional arguments will be passed to the underlying model's `__init__` method. + retriever ([`RagRetriever`], *optional*): + The retriever to use. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., + `output_attentions=True`). + + - To update the question_encoder configuration, use the prefix *question_encoder_* for each + configuration parameter. + - To update the generator configuration, use the prefix *generator_* for each configuration parameter. + - To update the parent model configuration, do not use a prefix for each configuration parameter. + + Behaves differently depending on whether a `config` is provided or automatically loaded. + + Example: + + ```python + >>> from transformers import RagRetriever, TFRagModel + + >>> # initialize a RAG from two pretrained models. + >>> model = TFRagModel.from_pretrained_question_encoder_generator( + ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" + ... ) + >>> # alternatively, initialize from pytorch pretrained models can also be done + >>> model = TFRagModel.from_pretrained_question_encoder_generator( + ... "facebook/dpr-question_encoder-single-nq-base", + ... "facebook/bart-base", + ... generator_from_pt=True, + ... question_encoder_from_pt=True, + ... ) + + >>> # saving model after fine-tuning + >>> model.save_pretrained("./rag") + + >>> # load retriever + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True + ... ) + >>> # load fine-tuned model with retriever + >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever) + ```""" + + kwargs_question_encoder = { + argument[len("question_encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("question_encoder_") + } + + kwargs_generator = { + argument[len("generator_") :]: value + for argument, value in kwargs.items() + if argument.startswith("generator_") + } + + # remove question_encoder, generator kwargs from kwargs + for key in kwargs_question_encoder.keys(): + del kwargs["question_encoder_" + key] + for key in kwargs_generator.keys(): + del kwargs["generator_" + key] + + # Load and initialize the question_encoder and generator + # The distinction between question_encoder and generator at the model level is made + # by the value of the flag `is_generator` that we need to set correctly. + question_encoder = kwargs_question_encoder.pop("model", None) + if question_encoder is None: + assert question_encoder_pretrained_model_name_or_path is not None, ( + "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" + " be defined" + ) + + from ..auto.modeling_tf_auto import TFAutoModel + + if "config" not in kwargs_question_encoder: + from ..auto.configuration_auto import AutoConfig + + question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) + kwargs_question_encoder["config"] = question_encoder_config + + question_encoder = TFAutoModel.from_pretrained( + question_encoder_pretrained_model_name_or_path, + name="question_encoder", + load_weight_prefix=cls.load_weight_prefix, + *model_args, + **kwargs_question_encoder, + ) + + generator = kwargs_generator.pop("generator", None) + if generator is None: + assert generator_pretrained_model_name_or_path is not None, ( + "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" + " to be defined" + ) + + from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM + + if "config" not in kwargs_generator: + from ..auto.configuration_auto import AutoConfig + + generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) + kwargs_generator["config"] = generator_config + + generator = TFAutoModelForSeq2SeqLM.from_pretrained( + generator_pretrained_model_name_or_path, + name="generator", + load_weight_prefix=cls.load_weight_prefix, + **kwargs_generator, + ) + + # instantiate config with corresponding kwargs + config = kwargs.get("config", None) + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + + return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) + + +RAG_START_DOCSTRING = r""" + + RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. + During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract + relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to + the generator. + + The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be + any *seq2seq* model, preferably [`TFBartForConditionalGeneration`]. + + The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the + outputs of a retriever in multiple steps---see examples for more details. The model is compatible any + *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`. + It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`] + as the `generator`. + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) + subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to + general usage and behavior. + + The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in + SavedModel format. + + Args: + config ([`RagConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. + question_encoder ([`TFPreTrainedModel`]): + An encoder model compatible with the faiss index encapsulated by the `retriever`. + generator ([`TFPreTrainedModel`]): + A seq2seq model used as the generator in the RAG architecture. + retriever ([`RagRetriever`]): + A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. +""" + + +RAG_FORWARD_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies + which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to + obtain the indices. + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*) + Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, + *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * + sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the + generator's encoder. + + Used by the ([`TFRagModel`]) model during decoding. + decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Provide for generation tasks. `None` by default, construct as per instructions for the generator model + you're using with your RAG instance. + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + past_key_values (`tuple(tuple(tf.Tensor))`): + Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and + `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used + in the ([`RagTokenForGeneration`]) model during decoding. + doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` + has to be provided to the forward pass. `doc_scores` can be computed via + `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. + context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask + (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when + *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question + encoder `input_ids` by the retriever. + + If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the + forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_retrieved(`bool`, *optional*): + Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and + `context_attention_mask`. See returned tensors for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple. + n_docs (`int`, *optional*, defaults to `config.n_docs``) + Number of documents to retrieve and/or number of documents for which to generate an answer. +""" + + +@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) +class TFRagModel(TFRagPreTrainedModel): + load_weight_prefix = "tf_rag_model_1" + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[TFPreTrainedModel] = None, + generator: Optional[TFPreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, + load_weight_prefix: Optional[str] = None, + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an question_encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + else: + assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" + super().__init__(config, **kwargs) + + if question_encoder is None: + from ..auto.modeling_tf_auto import TFAutoModel + + question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder") + + if generator is None: + from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM + + load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix + generator = TFAutoModelForSeq2SeqLM.from_config( + config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator" + ) + + self.retriever = retriever + if self.retriever is not None: + assert isinstance( + retriever, RagRetriever + ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" + self.retriever = retriever + + self.question_encoder = question_encoder + self.generator = generator + + def set_retriever(self, retriever: RagRetriever): + self.retriever = retriever + + @unpack_inputs + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_outputs: np.ndarray | tf.Tensor | None = None, + decoder_input_ids: np.ndarray | tf.Tensor | None = None, + decoder_attention_mask: np.ndarray | tf.Tensor | None = None, + past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, + doc_scores: np.ndarray | tf.Tensor | None = None, + context_input_ids: np.ndarray | tf.Tensor | None = None, + context_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: bool | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + output_retrieved: bool | None = None, + n_docs: int | None = None, + return_dict: bool | None = None, + training: bool = False, + **kwargs, + ) -> TFRetrievAugLMOutput: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) + + >>> input_dict = tokenizer.prepare_seq2seq_batch( + ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" + ... ) + >>> input_ids = input_dict["input_ids"] + >>> outputs = model(input_ids) + ```""" + assert ( + "decoder_cached_states" not in kwargs + ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py + + # aliasing to minimize code changing + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # whether retriever has to be used + has_to_retrieve = ( + self.retriever is not None + and (context_input_ids is None or context_attention_mask is None or doc_scores is None) + and encoder_outputs is None + ) + + # encoder_outputs are pre-computed during RAG-token generation + if encoder_outputs is None: + if has_to_retrieve: + question_enc_outputs = self.question_encoder( + input_ids, attention_mask=attention_mask, return_dict=True, training=training + ) + # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91 + question_encoder_last_hidden_state = question_enc_outputs[ + 0 + ] # hidden states of question encoder => pooler_output + + retriever_outputs = self.retriever( + input_ids, + question_encoder_last_hidden_state.numpy(), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="tf", + ) + context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( + retriever_outputs["context_input_ids"], + retriever_outputs["context_attention_mask"], + retriever_outputs["retrieved_doc_embeds"], + retriever_outputs["doc_ids"], + ) + + context_input_ids = tf.cast(context_input_ids, tf.int32) + context_attention_mask = tf.cast(context_attention_mask, tf.int32) + retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) + retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32) + + # compute doc_scores + doc_scores = tf.squeeze( + tf.matmul( + tf.expand_dims(question_encoder_last_hidden_state, axis=1), + retrieved_doc_embeds, + transpose_b=True, + ), + axis=1, + ) + + else: + assert context_input_ids is not None, ( + "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" + " set a retriever using the `set_retriever(...)` function." + ) + assert context_attention_mask is not None, ( + "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" + " can set a retriever using the `set_retriever(...)` function." + ) + assert doc_scores is not None, ( + "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" + " retriever using the `set_retriever(...)` function." + ) + + assert ( + doc_scores is not None + ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." + + assert (doc_scores.shape[1] % n_docs) == 0, ( + f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" + f" {context_input_ids.shape[0]}." + ) + + # Decoder input without context documents + if decoder_input_ids is not None: + decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0) + + if decoder_attention_mask is not None: + decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0) + + gen_outputs = self.generator( + context_input_ids, + attention_mask=context_attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + return_dict=True, + training=training, + ) + + if not has_to_retrieve: + question_encoder_last_hidden_state = None + question_enc_hidden_states = None + question_enc_attentions = None + retrieved_doc_embeds = None + retrieved_doc_ids = None + else: + question_enc_hidden_states = question_enc_outputs.hidden_states + question_enc_attentions = question_enc_outputs.attentions + + if not has_to_retrieve or not output_retrieved: + # don't output retrieved docs + context_input_ids = (None,) + context_attention_mask = None + retrieved_doc_embeds = None + retrieved_doc_ids = None + + return TFRetrievAugLMOutput( + logits=gen_outputs.logits, + doc_scores=doc_scores, + past_key_values=gen_outputs.past_key_values, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + retrieved_doc_embeds=retrieved_doc_embeds, + retrieved_doc_ids=retrieved_doc_ids, + question_encoder_last_hidden_state=question_encoder_last_hidden_state, + question_enc_hidden_states=question_enc_hidden_states, + question_enc_attentions=question_enc_attentions, + generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, + generator_enc_hidden_states=gen_outputs.encoder_hidden_states, + generator_enc_attentions=gen_outputs.encoder_attentions, + generator_dec_hidden_states=gen_outputs.decoder_hidden_states, + generator_dec_attentions=gen_outputs.decoder_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + with tf.name_scope(self.generator.name): + self.generator.build(None) + with tf.name_scope(self.question_encoder.name): + self.question_encoder.build(None) + + +@add_start_docstrings_to_model_forward( + """ + A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. + """, + RAG_START_DOCSTRING, +) +class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): + load_weight_prefix = "tf_rag_token_for_generation_1/rag" + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[TFPreTrainedModel] = None, + generator: Optional[TFPreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + + super().__init__(config) + + # instantiate model + self.rag = TFRagModel( + config=config, + question_encoder=question_encoder, + generator=generator, + retriever=retriever, + load_weight_prefix=self.load_weight_prefix, + name="rag", + ) + + def set_retriever(self, retriever: RagRetriever): + self.rag.retriever = retriever + + # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + doc_scores=None, + n_docs=None, + **kwargs, + ): + if past_key_values is not None: + # if past is defined use only last decoder_input_ids + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, + "encoder_outputs": encoder_outputs, + "doc_scores": doc_scores, + "context_attention_mask": attention_mask, + "decoder_input_ids": decoder_input_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "do_marginalize": True, + "n_docs": n_docs, + } + + @property + def retriever(self): + return self.rag.retriever + + @property + def generator(self): + return self.rag.generator + + @property + def question_encoder(self): + return self.rag.question_encoder + + @staticmethod + def _gather_beams(nested, beam_indices, batch_axis=0): + """ + RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the + nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates + and takes care of the extra dimension for ndocs. + """ + + def gather_fn(tensor): + is_rag_cache = tensor.shape[0] != beam_indices.shape[0] + if is_rag_cache: + n_docs = tensor.shape[0] // beam_indices.shape[0] + batch_size = beam_indices.shape[0] + # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG + tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:])) + + gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) + + if is_rag_cache: + # reshapes back into the shape expected by beam search + gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:])) + + return gathered_tensor + + return tf.nest.map_structure(gather_fn, nested) + + def marginalize(self, seq_logits, doc_scores, n_docs=None): + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # RAG-token marginalization + seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) + seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]]) + doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) + doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) + doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice + log_prob_sum = seq_logprobs + doc_logprobs + return tf.reduce_logsumexp(log_prob_sum, axis=1) + + @unpack_inputs + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + decoder_input_ids: np.ndarray | tf.Tensor | None = None, + decoder_attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_outputs: np.ndarray | tf.Tensor | None = None, + past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None, + doc_scores: np.ndarray | tf.Tensor | None = None, + context_input_ids: np.ndarray | tf.Tensor | None = None, + context_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: bool | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + output_retrieved: bool | None = None, + n_docs: int | None = None, + do_marginalize: bool | None = None, + labels: np.ndarray | tf.Tensor | None = None, + reduce_loss: bool | None = None, + return_dict: bool | None = None, + training: bool = False, + **kwargs, # needs kwargs for generation + ) -> TFRetrievAugLMMarginOutput: + r""" + do_marginalize (`bool`, *optional*): + If `True`, the logits are marginalized over all documents by making use of + `torch.nn.functional.log_softmax`. + labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss according to Rag-Token model formulation See + https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be + in `[0, ..., config.vocab_size - 1]`. + reduce_loss (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` + operation. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Legacy dictionary, which is required so that model can use *generate()* function. + + Returns: + + Example: + + ```python + >>> import tensorflow as tf + >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) + + >>> input_dict = tokenizer.prepare_seq2seq_batch( + ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" + ... ) + >>> outputs = model(input_dict, output_retrieved=True) + + >>> # or use retriever separately + >>> # 1. Encode + >>> input_ids = input_dict["input_ids"] + >>> question_hidden_states = model.question_encoder(input_ids)[0] + >>> # 2. Retrieve + >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") + >>> doc_scores = tf.squeeze( + ... tf.matmul( + ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True + ... ), + ... axis=1, + ... ) + >>> # 3. Forward to generator + >>> outputs = model( + ... inputs=None, + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... decoder_input_ids=input_dict["labels"], + ... ) + + >>> # or directly generate + >>> generated = model.generate( + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... ) + >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) + ```""" + + assert ( + "decoder_cached_states" not in kwargs + ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py + + do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize + reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = labels + use_cache = False + + outputs = self.rag( + input_ids, + attention_mask=attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + doc_scores=doc_scores, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_retrieved=output_retrieved, + n_docs=n_docs, + training=training, + ) + + loss = None + logits = outputs.logits + if labels is not None: + assert decoder_input_ids is not None + loss = self.get_nll( + outputs.logits, + outputs.doc_scores, + labels, + reduce_loss=reduce_loss, + epsilon=self.config.label_smoothing, + n_docs=n_docs, + ) + + if do_marginalize: + logits = self.marginalize(logits, outputs.doc_scores, n_docs) + + return TFRetrievAugLMMarginOutput( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + doc_scores=outputs.doc_scores, + context_input_ids=outputs.context_input_ids, + context_attention_mask=outputs.context_attention_mask, + retrieved_doc_embeds=outputs.retrieved_doc_embeds, + retrieved_doc_ids=outputs.retrieved_doc_ids, + question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, + question_enc_hidden_states=outputs.question_enc_hidden_states, + question_enc_attentions=outputs.question_enc_attentions, + generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, + generator_enc_hidden_states=outputs.generator_enc_hidden_states, + generator_enc_attentions=outputs.generator_enc_attentions, + generator_dec_hidden_states=outputs.generator_dec_hidden_states, + generator_dec_attentions=outputs.generator_dec_attentions, + ) + + def generate( + self, + input_ids: TFModelInputType | None = None, + attention_mask: tf.Tensor | None = None, + context_input_ids=None, + context_attention_mask=None, + doc_scores=None, + n_docs=None, + generation_config=None, + logits_processor=TFLogitsProcessorList(), + **kwargs, + ): + """ + Implements TFRAG token decoding. + + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + The sequence used as a prompt for the generation. If `input_ids` is not passed, then + `context_input_ids` has to be provided. + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. + + If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the + forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. + n_docs (`int`, *optional*, defaults to `config.n_docs`) + Number of documents to retrieve and/or number of documents for which to generate an answer. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`TFLogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and a + model's config. If a logit processor is passed that is already created with the arguments or a model's + config an error is thrown. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. + + Return: + `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The + second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early + due to the `eos_token_id`. + """ + # Handle `generation_config` and kwargs that might update it + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + + # set default parameters + n_docs = n_docs if n_docs is not None else self.config.n_docs + + # retrieve docs + if self.retriever is not None and context_input_ids is None: + question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] + out = self.retriever( + input_ids, + question_hidden_states.numpy().astype(np.float32), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="tf", + ) + context_input_ids, context_attention_mask, retrieved_doc_embeds = ( + out["context_input_ids"], + out["context_attention_mask"], + out["retrieved_doc_embeds"], + ) + + context_input_ids = tf.cast(context_input_ids, tf.int32) + context_attention_mask = tf.cast(context_attention_mask, tf.int32) + retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) + + # compute doc_scores + doc_scores = tf.matmul( + tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True + ) + doc_scores = tf.squeeze(doc_scores, axis=1) + + assert (context_input_ids.shape[0] % n_docs) == 0, ( + f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" + f" {context_input_ids.shape[0]}." + ) + + batch_size = context_input_ids.shape[0] // n_docs + + encoder = self.rag.generator.get_encoder() + encoder_outputs = encoder( + input_ids=context_input_ids, + attention_mask=context_attention_mask, + output_attentions=generation_config.output_attentions, + output_hidden_states=generation_config.output_hidden_states, + return_dict=True, + ) + + decoder_input_ids = tf.fill( + (batch_size * generation_config.num_beams, 1), + tf.cast(generation_config.decoder_start_token_id, tf.int32), + ) + last_hidden_state = encoder_outputs["last_hidden_state"] + + def extend_enc_output(tensor, num_beams=None): + """ + Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs , + d) Output: tensor of shape (batch_size*num_beams*n_docs , d) + """ + + # expand batch_size & num_beam dimensions + d_shape_list = tensor.shape[1:] + + # split n_docs dimensions + new_shape = (batch_size, 1, n_docs) + d_shape_list + tensor = tf.reshape(tensor, new_shape) + + # repeat same last hidden states over `num_beams` dimension + new_shape = (batch_size, num_beams, n_docs) + d_shape_list + tensor = tf.broadcast_to(tensor, new_shape) + + # merge `batch_size`, `num_beams`, `num_docs` dims again + new_shape = (batch_size * num_beams * n_docs,) + d_shape_list + return tf.reshape(tensor, new_shape) + + # correctly extend last_hidden_state and attention mask + context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) + encoder_outputs["last_hidden_state"] = extend_enc_output( + last_hidden_state, num_beams=generation_config.num_beams + ) + + doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0) + + # define start_len & additional parameters + model_kwargs["doc_scores"] = doc_scores + model_kwargs["encoder_outputs"] = encoder_outputs + model_kwargs["attention_mask"] = context_attention_mask + model_kwargs["n_docs"] = n_docs + + pre_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=tf.shape(decoder_input_ids)[-1], + logits_processor=logits_processor, + ) + + if generation_config.num_beams == 1: + return self.greedy_search( + input_ids=decoder_input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + logits_processor=pre_processor, + output_attentions=generation_config.output_attentions, + output_hidden_states=generation_config.output_hidden_states, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + elif generation_config.num_beams > 1: + if generation_config.num_beams < generation_config.num_return_sequences: + raise ValueError( + "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" + f" num_return_sequences, got {generation_config.num_beams} and" + f" {generation_config.num_return_sequences} (respectivelly)" + ) + + def unflatten_beam_dim(tensor): + """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" + shape = shape_list(tensor) + return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:]) + + decoder_input_ids = unflatten_beam_dim(decoder_input_ids) + model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"]) + model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim( + model_kwargs["encoder_outputs"]["last_hidden_state"] + ) + + return self.beam_search( + input_ids=decoder_input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + logits_processor=pre_processor, + output_attentions=generation_config.output_attentions, + output_hidden_states=generation_config.output_hidden_states, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + else: + raise ValueError( + f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" + ) + + def get_input_embeddings(self): + return self.rag.generator.get_input_embeddings() + + def get_output_embeddings(self): + return self.rag.generator.get_output_embeddings() + + # Adapted from tf_t5's & tf_bart's _shift_right + def shift_tokens_right(self, input_ids, start_token_id=None): + """Shift input ids one token to the right, and pad with start_token_id""" + + if start_token_id is None: + start_token_id = self.generator.config.decoder_start_token_id + assert start_token_id is not None, ( + "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as" + " generator, see Bart docs for more information" + ) + + pad_token_id = self.generator.config.pad_token_id + assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." + + start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype)) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) + + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids = tf.where( + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)), + shifted_input_ids, + ) + + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype)) + + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) + + return shifted_input_ids + + # nll stands for 'negative log likelihood' + def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): + n_docs = n_docs if n_docs is not None else self.config.n_docs + # shift tokens left (from original Pytorch's version) + + target = tf.concat( + [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], + axis=1, + ) + rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) + loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) + + return loss + + # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version + def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False): + """CrossEntropyLoss that ignores pad tokens""" + # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things + # and I don't feel comfortable converting it. + loss_fn = keras.losses.SparseCategoricalCrossentropy( + from_logits=True, + reduction=keras.losses.Reduction.SUM, + ) + + if from_logits is False: # convert to logits + eps = 1e-9 + y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps) + y_pred = tf.math.log(y_pred) + + logits = y_pred + melted_labels = tf.reshape(labels, (-1,)) + active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id) + + reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss) + labels = tf.boolean_mask(melted_labels, active_loss) + nll_loss = loss_fn(labels, reduced_logits) + + smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1) + smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch + eps_i = smooth_epsilon / reduced_logits.shape[-1] + + loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss + + return loss + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "rag", None) is not None: + with tf.name_scope(self.rag.name): + self.rag.build(None) + + +@add_start_docstrings_to_model_forward( + """ + A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. + """, + RAG_START_DOCSTRING, +) +class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): + load_weight_prefix = "tf_rag_sequence_for_generation_1/rag" + + def __init__( + self, + config: Optional[PretrainedConfig] = None, + question_encoder: Optional[TFPreTrainedModel] = None, + generator: Optional[TFPreTrainedModel] = None, + retriever: Optional[RagRetriever] = None, + **kwargs, + ): + assert config is not None or ( + question_encoder is not None and generator is not None + ), "Either a configuration or an encoder and a generator has to be provided." + + if config is None: + config = RagConfig.from_question_encoder_generator_configs( + question_encoder.config, generator.config, **kwargs + ) + + super().__init__(config) + + # instantiate model + self.rag = TFRagModel( + config=config, + question_encoder=question_encoder, + generator=generator, + retriever=retriever, + load_weight_prefix=self.load_weight_prefix, + name="rag", + ) + + def set_retriever(self, retriever: RagRetriever): + self.rag.retriever = retriever + + @property + def retriever(self): + return self.rag.retriever + + @property + def generator(self): + return self.rag.generator + + @property + def question_encoder(self): + return self.rag.question_encoder + + @unpack_inputs + @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + decoder_input_ids: np.ndarray | tf.Tensor | None = None, + decoder_attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_outputs: np.ndarray | tf.Tensor | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + doc_scores: np.ndarray | tf.Tensor | None = None, + context_input_ids: np.ndarray | tf.Tensor | None = None, + context_attention_mask: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + n_docs: Optional[int] = None, + exclude_bos_score: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + reduce_loss: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + **kwargs, # needs kwargs for generation + ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]: + r""" + exclude_bos_score (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing + the loss. + labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See + https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should + be in `[0, ..., config.vocab_size - 1]`. + reduce_loss (`bool`, *optional*): + Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` + operation. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Legacy dictionary, which is required so that model can use *generate()* function. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True + ... ) + >>> # initialize with RagRetriever to do everything in one forward call + >>> model = TFRagSequenceForGeneration.from_pretrained( + ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True + ... ) + + >>> input_dict = tokenizer.prepare_seq2seq_batch( + ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" + ... ) + >>> outputs = model(input_dict, output_retrieved=True) + + >>> # or use retriever separately + >>> # 1. Encode + >>> input_ids = input_dict["input_ids"] + >>> question_hidden_states = model.question_encoder(input_ids)[0] + >>> # 2. Retrieve + >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") + >>> doc_scores = tf.squeeze( + ... tf.matmul( + ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True + ... ), + ... axis=1, + ... ) + >>> # 3. Forward to generator + >>> outputs = model( + ... inputs=None, + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... decoder_input_ids=input_dict["labels"], + ... ) + + >>> # or directly generate + >>> generated = model.generate( + ... context_input_ids=docs_dict["context_input_ids"], + ... context_attention_mask=docs_dict["context_attention_mask"], + ... doc_scores=doc_scores, + ... ) + >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) + ```""" + + assert ( + "decoder_cached_states" not in kwargs + ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py + + exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score + reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = labels + use_cache = False + + outputs = self.rag( + input_ids, + attention_mask=attention_mask, + encoder_outputs=encoder_outputs, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + context_input_ids=context_input_ids, + context_attention_mask=context_attention_mask, + doc_scores=doc_scores, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_retrieved=output_retrieved, + n_docs=n_docs, + training=training, + ) + + loss = None + if labels is not None: + loss = self.get_nll( + outputs.logits, + outputs.doc_scores, + labels, + reduce_loss=reduce_loss, + epsilon=self.config.label_smoothing, + n_docs=n_docs, + ) + + return TFRetrievAugLMMarginOutput( + loss=loss, + logits=outputs.logits, + doc_scores=outputs.doc_scores, + past_key_values=outputs.past_key_values, + context_input_ids=outputs.context_input_ids, + context_attention_mask=outputs.context_attention_mask, + retrieved_doc_embeds=outputs.retrieved_doc_embeds, + retrieved_doc_ids=outputs.retrieved_doc_ids, + question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, + question_enc_hidden_states=outputs.question_enc_hidden_states, + question_enc_attentions=outputs.question_enc_attentions, + generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, + generator_enc_hidden_states=outputs.generator_enc_hidden_states, + generator_enc_attentions=outputs.generator_enc_attentions, + generator_dec_hidden_states=outputs.generator_dec_hidden_states, + generator_dec_attentions=outputs.generator_dec_attentions, + ) + + def get_nll( + self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None + ): + # shift tokens left + target = tf.concat( + [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], + axis=1, + ) + + # bos_token_id is None for T5 + bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id + n_docs = n_docs if n_docs is not None else self.config.n_docs + equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id)) + use_bos = bos_token_id is not None and equal_bos_token_id_all + + def _mask_pads(ll, smooth_obj): + pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype)) + if tf.reduce_any(pad_mask): + ll = tf.where(pad_mask, 0.0, ll) + smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) + return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1) + + # seq_logits.shape = (batch*n_docs, tgt_len , vocabs) + seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) + seq_logprobs = tf.reshape( + seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]) + ) # (batch_size, n_docs, tgt_len, vocabs) + doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) + doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) + doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D + + # RAG-sequence marginalization + first_token_scores = seq_logprobs[:, :, :1, :] + second_token_scores = seq_logprobs[:, :, 1:2, :] + remainder = seq_logprobs[:, :, 2:, :] + rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2) + + # calculate loss + target = tf.expand_dims(target, axis=1) # n_docs dimension + target = tf.expand_dims(target, axis=-1) # logits dimension + target = tf.repeat(target, n_docs, axis=1) + assert len(target.shape) == len(rag_logprobs.shape) + + # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering + def torch_gather(param, id_tensor): + # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather + def gather2d(target, id_tensor): + idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1) + result = tf.gather_nd(target, idx) + return tf.expand_dims(result, axis=-1) + + target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D + target_shape = id_tensor.shape + + id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index + result = gather2d(target, id_tensor) + return tf.reshape(result, target_shape) + + ll = torch_gather(rag_logprobs, id_tensor=target) + smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits + + ll, smooth_obj = _mask_pads(ll, smooth_obj) + + # sum over tokens, exclude bos while scoring + if exclude_bos_score and use_bos: + ll = tf.reduce_sum(ll[:, :, 1:], axis=2) + else: + ll = tf.reduce_sum(ll, axis=2) + + smooth_obj = tf.reduce_sum(smooth_obj, axis=2) + ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs + smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1) + + nll_loss = -ll + smooth_loss = -smooth_obj + + if reduce_loss: + nll_loss = tf.reduce_sum(nll_loss) + smooth_loss = tf.reduce_sum(smooth_loss) + + eps_i = epsilon / rag_logprobs.shape[-1] + loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss + return loss + + def generate( + self, + input_ids: TFModelInputType | None = None, + attention_mask: tf.Tensor | None = None, + context_input_ids=None, + context_attention_mask=None, + doc_scores=None, + do_deduplication=None, # defaults to True + num_return_sequences=None, # defaults to 1 + num_beams=None, # defaults to 1 + n_docs=None, + **model_kwargs, + ): + """ + Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation + for more information on how to set other generate input parameters + + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + The sequence used as a prompt for the generation. If `input_ids` is not passed, then + `context_input_ids` has to be provided. + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for + tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention + masks?](../glossary#attention-mask) + context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Input IDs post-processed from the retrieved documents and the question encoder input_ids by the + retriever. + context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): + Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the + retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given, + `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are + returned by [`~RagRetriever.__call__`]. + doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): + Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and + `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or + `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are + returned by [`~RagRetriever.__call__`]. + do_deduplication (`bool`, *optional*): + Whether or not to deduplicate the generations from different context documents for a given input. Has + to be set to `False` if used while training with distributed backend. + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. Note that this + is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, + where we set `num_return_sequences` to `num_beams`. + num_beams (`int`, *optional*, defaults to 1): + Number of beams for beam search. 1 means no beam search. + n_docs (`int`, *optional*, defaults to `config.n_docs`) + Number of documents to retrieve and/or number of documents for which to generate an answer. + kwargs (`Dict[str, Any]`, *optional*): + Additional kwargs will be passed to [`~generation.GenerationMixin.generate`] + + Return: + `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The + second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early + due to the `eos_token_id`. + """ + + n_docs = n_docs if n_docs is not None else self.config.n_docs + do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication + num_doc_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences + ) + num_beams = num_beams if num_beams is not None else self.config.num_beams + + assert ( + input_ids is not None or context_input_ids is not None + ), " At least one of input_ids or context_input_ids must be given" + + if self.retriever is not None and context_input_ids is None: + question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] + context_input_ids = self.retriever( + input_ids, + question_hidden_states.numpy(), + prefix=self.generator.config.prefix, + n_docs=n_docs, + return_tensors="tf", + )["context_input_ids"] + + hypos = [] + model_kwargs["num_beams"] = num_beams + model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences + model_kwargs["attention_mask"] = None + + batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs + + for index in range(batch_size): + # first, generate beams from documents: + generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) + + output_sequences = self.generator.generate( + generator_input_ids, + **model_kwargs, + ) # n_docs * n_beam, tgt_len + if do_deduplication: + # do_deduplication -- for TF, work on Eager mode only! + output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values())) + + num_candidates = output_sequences.shape[ + 0 + ] # after deduplication, this number can be less than n_docs*n_beam + + # then, run model forwards to get nll scores: + if input_ids is not None: + new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1)) + outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) + else: # input_ids is None, need context_input_ids/mask and doc_scores + assert context_attention_mask is not None, ( + "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" + " can set a retriever using the `set_retriever(...)` function." + ) + assert doc_scores is not None, ( + "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" + " retriever using the `set_retriever(...)` function." + ) + + individual_input_ids = tf.tile( + generator_input_ids, (num_candidates, 1) + ) # (num_candidates*n_docs, max_len) + + individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] + individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1)) + + individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] + individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs] + + outputs = self( + input_ids=None, + context_input_ids=individual_input_ids, + context_attention_mask=individual_attention_mask, + doc_scores=individual_doc_scores, + labels=output_sequences, + exclude_bos_score=True, + ) + + top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1] + + # add hypothesis + hypos.append(tf.gather(output_sequences, top_cand_inds)) + + return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) + + @staticmethod + def _cat_and_pad(tensors, pad_token_id): + # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch + + # Initialize padded tensor with shape ( all_candidates , max_candidate_length ), + # where all_candidates counted from all inputs + new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]) + output = tf.fill(new_shape, pad_token_id) + + # Normal tensor doesn't support slice assignment, so we need tf.Variable + output = tf.Variable(output) + + # Assign, and then convert back to tensor + ind = 0 + for t in tensors: + output[ind : ind + t.shape[0], : t.shape[1]].assign(t) + ind += t.shape[0] + + output = tf.convert_to_tensor(output) + return tf.cast(output, tensors[0][0][0].dtype) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "rag", None) is not None: + with tf.name_scope(self.rag.name): + self.rag.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py b/venv/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py new file mode 100644 index 0000000000000000000000000000000000000000..a448132300d338c13351d9d4a7c1eb285c263e7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/retrieval_rag.py @@ -0,0 +1,674 @@ +# coding=utf-8 +# Copyright 2020, The RAG Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""RAG Retriever model implementation.""" + +import os +import pickle +import time +from typing import Iterable, List, Optional, Tuple + +import numpy as np + +from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import BatchEncoding +from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool +from .configuration_rag import RagConfig +from .tokenization_rag import RagTokenizer + + +if is_datasets_available(): + from datasets import Dataset, load_dataset, load_from_disk + +if is_faiss_available(): + import faiss + + +logger = logging.get_logger(__name__) + + +LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/" + + +class Index: + """ + A base class for the Indices encapsulated by the [`RagRetriever`]. + """ + + def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: + """ + Returns a list of dictionaries, containing titles and text of the retrieved documents. + + Args: + doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`): + A tensor of document indices. + """ + raise NotImplementedError + + def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: + """ + For each query in the batch, retrieves `n_docs` documents. + + Args: + question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): + An array of query vectors. + n_docs (`int`): + The number of docs retrieved per query. + + Returns: + `np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of + shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents. + """ + raise NotImplementedError + + def is_initialized(self): + """ + Returns `True` if index is already initialized. + """ + raise NotImplementedError + + def init_index(self): + """ + A function responsible for loading the index into memory. Should be called only once per training run of a RAG + model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load + the index. + """ + raise NotImplementedError + + +class LegacyIndex(Index): + """ + An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use + default faiss index parameters as specified in that repository. + + Args: + vector_size (`int`): + The dimension of indexed vectors. + index_path (`str`): + A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`] + """ + + INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" + PASSAGE_FILENAME = "psgs_w100.tsv.pkl" + + def __init__(self, vector_size, index_path): + self.index_id_to_db_id = [] + self.index_path = index_path + self.passages = self._load_passages() + self.vector_size = vector_size + self.index = None + self._index_initialized = False + + def _resolve_path(self, index_path, filename): + is_local = os.path.isdir(index_path) + try: + # Load from URL or cache if already cached + resolved_archive_file = cached_file(index_path, filename) + except EnvironmentError: + msg = ( + f"Can't load '{filename}'. Make sure that:\n\n" + f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" + f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" + ) + raise EnvironmentError(msg) + if is_local: + logger.info(f"loading file {resolved_archive_file}") + else: + logger.info(f"loading file {filename} from cache at {resolved_archive_file}") + return resolved_archive_file + + def _load_passages(self): + logger.info(f"Loading passages from {self.index_path}") + passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) + if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): + raise ValueError( + "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " + "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " + "that could have been tampered with. If you already verified the pickle data and decided to use it, " + "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." + ) + with open(passages_path, "rb") as passages_file: + passages = pickle.load(passages_file) + return passages + + def _deserialize_index(self): + logger.info(f"Loading index from {self.index_path}") + resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index.dpr") + self.index = faiss.read_index(resolved_index_path) + resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + ".index_meta.dpr") + if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")): + raise ValueError( + "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially " + "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or " + "that could have been tampered with. If you already verified the pickle data and decided to use it, " + "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it." + ) + with open(resolved_meta_path, "rb") as metadata_file: + self.index_id_to_db_id = pickle.load(metadata_file) + assert ( + len(self.index_id_to_db_id) == self.index.ntotal + ), "Deserialized index_id_to_db_id should match faiss index size" + + def is_initialized(self): + return self._index_initialized + + def init_index(self): + index = faiss.IndexHNSWFlat(self.vector_size + 1, 512) + index.hnsw.efSearch = 128 + index.hnsw.efConstruction = 200 + self.index = index + self._deserialize_index() + self._index_initialized = True + + def get_doc_dicts(self, doc_ids: np.array): + doc_list = [] + for doc_ids_i in doc_ids: + ids = [str(int(doc_id)) for doc_id in doc_ids_i] + docs = [self.passages[doc_id] for doc_id in ids] + doc_list.append(docs) + doc_dicts = [] + for docs in doc_list: + doc_dict = {} + doc_dict["title"] = [doc[1] for doc in docs] + doc_dict["text"] = [doc[0] for doc in docs] + doc_dicts.append(doc_dict) + return doc_dicts + + def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: + aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1) + query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) + _, docs_ids = self.index.search(query_nhsw_vectors, n_docs) + vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids] + ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] + return np.array(ids), np.array(vectors) + + +class HFIndexBase(Index): + def __init__(self, vector_size, dataset, index_initialized=False): + self.vector_size = vector_size + self.dataset = dataset + self._index_initialized = index_initialized + self._check_dataset_format(with_index=index_initialized) + dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True, dtype="float32") + + def _check_dataset_format(self, with_index: bool): + if not isinstance(self.dataset, Dataset): + raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") + if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: + raise ValueError( + "Dataset should be a dataset with the following columns: " + "title (str), text (str) and embeddings (arrays of dimension vector_size), " + f"but got columns {self.dataset.column_names}" + ) + if with_index and "embeddings" not in self.dataset.list_indexes(): + raise ValueError( + "Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it " + "or `dataset.load_faiss_index` to load one from the disk." + ) + + def init_index(self): + raise NotImplementedError() + + def is_initialized(self): + return self._index_initialized + + def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]: + return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])] + + def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]: + _, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs) + docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids] + vectors = [doc["embeddings"] for doc in docs] + for i in range(len(vectors)): + if len(vectors[i]) < n_docs: + vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]) + return np.array(ids), np.array(vectors) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) + + +class CanonicalHFIndex(HFIndexBase): + """ + A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed + index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path + on disk. + + Args: + vector_size (`int`): the dimension of the passages embeddings used by the index + dataset_name (`str`, optional, defaults to `wiki_dpr`): + A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids + with `datasets.list_datasets()`). + dataset_split (`str`, optional, defaults to `train`) + Which split of the `dataset` to load. + index_name (`str`, optional, defaults to `train`) + The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved + under this name. + index_path (`str`, optional, defaults to `None`) + The path to the serialized faiss index on disk. + use_dummy_dataset (`bool`, optional, defaults to `False`): + If True, use the dummy configuration of the dataset for tests. + """ + + def __init__( + self, + vector_size: int, + dataset_name: str = "wiki_dpr", + dataset_split: str = "train", + index_name: Optional[str] = None, + index_path: Optional[str] = None, + use_dummy_dataset=False, + dataset_revision=None, + ): + if int(index_path is None) + int(index_name is None) != 1: + raise ValueError("Please provide `index_name` or `index_path`.") + self.dataset_name = dataset_name + self.dataset_split = dataset_split + self.index_name = index_name + self.index_path = index_path + self.use_dummy_dataset = use_dummy_dataset + self.dataset_revision = dataset_revision + logger.info(f"Loading passages from {self.dataset_name}") + dataset = load_dataset( + self.dataset_name, + with_index=False, + split=self.dataset_split, + dummy=self.use_dummy_dataset, + revision=dataset_revision, + ) + super().__init__(vector_size, dataset, index_initialized=False) + + def init_index(self): + if self.index_path is not None: + logger.info(f"Loading index from {self.index_path}") + self.dataset.load_faiss_index("embeddings", file=self.index_path) + else: + logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}") + self.dataset = load_dataset( + self.dataset_name, + with_embeddings=True, + with_index=True, + split=self.dataset_split, + index_name=self.index_name, + dummy=self.use_dummy_dataset, + revision=self.dataset_revision, + ) + self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True) + self._index_initialized = True + + +class CustomHFIndex(HFIndexBase): + """ + A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the + indicated paths on disk. + + Args: + vector_size (`int`): the dimension of the passages embeddings used by the index + dataset_path (`str`): + The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and + embeddings (arrays of dimension vector_size) + index_path (`str`) + The path to the serialized faiss index on disk. + """ + + def __init__(self, vector_size: int, dataset, index_path=None): + super().__init__(vector_size, dataset, index_initialized=index_path is None) + self.index_path = index_path + + @classmethod + def load_from_disk(cls, vector_size, dataset_path, index_path): + logger.info(f"Loading passages from {dataset_path}") + if dataset_path is None or index_path is None: + raise ValueError( + "Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` " + "and `dataset.get_index('embeddings').save(index_path)`." + ) + dataset = load_from_disk(dataset_path) + return cls(vector_size=vector_size, dataset=dataset, index_path=index_path) + + def init_index(self): + if not self.is_initialized(): + logger.info(f"Loading index from {self.index_path}") + self.dataset.load_faiss_index("embeddings", file=self.index_path) + self._index_initialized = True + + +class RagRetriever: + """ + Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents + contents, and it formats them to be used with a RagModel. + + Args: + config ([`RagConfig`]): + The configuration of the RAG model this Retriever is used with. Contains parameters indicating which + `Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical + one (default) from the datasets library with `config.index_name="wiki_dpr"` for example. + question_encoder_tokenizer ([`PreTrainedTokenizer`]): + The tokenizer that was used to tokenize the question. It is used to decode the question and then use the + generator_tokenizer. + generator_tokenizer ([`PreTrainedTokenizer`]): + The tokenizer used for the generator part of the RagModel. + index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration): + If specified, use this index instead of the one built using the configuration + + Examples: + + ```python + >>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact') + >>> from transformers import RagRetriever + + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed" + ... ) + + >>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py + >>> from transformers import RagRetriever + + >>> dataset = ( + ... ... + ... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index + >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset) + + >>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py + >>> from transformers import RagRetriever + + >>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)* + >>> index_path = "path/to/my/index.faiss" # faiss index saved via *dataset.get_index("embeddings").save(...)* + >>> retriever = RagRetriever.from_pretrained( + ... "facebook/dpr-ctx_encoder-single-nq-base", + ... index_name="custom", + ... passages_path=dataset_path, + ... index_path=index_path, + ... ) + + >>> # To load the legacy index built originally for Rag's paper + >>> from transformers import RagRetriever + + >>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy") + ```""" + + def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True): + self._init_retrieval = init_retrieval + requires_backends(self, ["datasets", "faiss"]) + super().__init__() + self.index = index or self._build_index(config) + self.generator_tokenizer = generator_tokenizer + self.question_encoder_tokenizer = question_encoder_tokenizer + + self.n_docs = config.n_docs + self.batch_size = config.retrieval_batch_size + + self.config = config + if self._init_retrieval: + self.init_retrieval() + + self.ctx_encoder_tokenizer = None + self.return_tokenized_docs = False + + @staticmethod + def _build_index(config): + if config.index_name == "legacy": + return LegacyIndex( + config.retrieval_vector_size, + config.index_path or LEGACY_INDEX_PATH, + ) + elif config.index_name == "custom": + return CustomHFIndex.load_from_disk( + vector_size=config.retrieval_vector_size, + dataset_path=config.passages_path, + index_path=config.index_path, + ) + else: + return CanonicalHFIndex( + vector_size=config.retrieval_vector_size, + dataset_name=config.dataset, + dataset_split=config.dataset_split, + index_name=config.index_name, + index_path=config.index_path, + use_dummy_dataset=config.use_dummy_dataset, + dataset_revision=config.dataset_revision, + ) + + @classmethod + def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): + requires_backends(cls, ["datasets", "faiss"]) + config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) + rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) + question_encoder_tokenizer = rag_tokenizer.question_encoder + generator_tokenizer = rag_tokenizer.generator + if indexed_dataset is not None: + config.index_name = "custom" + index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) + else: + index = cls._build_index(config) + return cls( + config, + question_encoder_tokenizer=question_encoder_tokenizer, + generator_tokenizer=generator_tokenizer, + index=index, + ) + + def save_pretrained(self, save_directory): + if isinstance(self.index, CustomHFIndex): + if self.config.index_path is None: + index_path = os.path.join(save_directory, "hf_dataset_index.faiss") + self.index.dataset.get_index("embeddings").save(index_path) + self.config.index_path = index_path + if self.config.passages_path is None: + passages_path = os.path.join(save_directory, "hf_dataset") + # datasets don't support save_to_disk with indexes right now + faiss_index = self.index.dataset._indexes.pop("embeddings") + self.index.dataset.save_to_disk(passages_path) + self.index.dataset._indexes["embeddings"] = faiss_index + self.config.passages_path = passages_path + self.config.save_pretrained(save_directory) + rag_tokenizer = RagTokenizer( + question_encoder=self.question_encoder_tokenizer, + generator=self.generator_tokenizer, + ) + rag_tokenizer.save_pretrained(save_directory) + + def init_retrieval(self): + """ + Retriever initialization function. It loads the index into memory. + """ + + logger.info("initializing retrieval") + self.index.init_index() + + def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None): + r""" + Postprocessing retrieved `docs` and combining them with `input_strings`. + + Args: + docs (`dict`): + Retrieved documents. + input_strings (`str`): + Input strings decoded by `preprocess_query`. + prefix (`str`): + Prefix added at the beginning of each input, typically used with T5-based models. + + Return: + `tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible + `attention_mask`. + """ + + def cat_input_and_doc(doc_title, doc_text, input_string, prefix): + # TODO(Patrick): if we train more RAG models, I want to put the input first to take advantage of effortless truncation + # TODO(piktus): better handling of truncation + if doc_title.startswith('"'): + doc_title = doc_title[1:] + if doc_title.endswith('"'): + doc_title = doc_title[:-1] + if prefix is None: + prefix = "" + out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace( + " ", " " + ) + return out + + rag_input_strings = [ + cat_input_and_doc( + docs[i]["title"][j], + docs[i]["text"][j], + input_strings[i], + prefix, + ) + for i in range(len(docs)) + for j in range(n_docs) + ] + + contextualized_inputs = self.generator_tokenizer.batch_encode_plus( + rag_input_strings, + max_length=self.config.max_combined_length, + return_tensors=return_tensors, + padding="max_length", + truncation=True, + ) + + return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"] + + def _chunk_tensor(self, t: Iterable, chunk_size: int) -> List[Iterable]: + return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)] + + def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, np.ndarray]: + question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size) + ids_batched = [] + vectors_batched = [] + for question_hidden_states in question_hidden_states_batched: + start_time = time.time() + ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs) + logger.debug( + f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}" + ) + ids_batched.extend(ids) + vectors_batched.extend(vectors) + return ( + np.array(ids_batched), + np.array(vectors_batched), + ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d) + + def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: + """ + Retrieves documents for specified `question_hidden_states`. + + Args: + question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`): + A batch of query vectors to retrieve with. + n_docs (`int`): + The number of docs retrieved per query. + + Return: + `Tuple[np.ndarray, np.ndarray, List[dict]]`: A tuple with the following objects: + + - **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings + of the retrieved docs per query. + - **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index + - **doc_dicts** (`List[dict]`): The `retrieved_doc_embeds` examples per query. + """ + + doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) + return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) + + def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer): + # used in end2end retriever training + self.ctx_encoder_tokenizer = ctx_encoder_tokenizer + self.return_tokenized_docs = True + + def __call__( + self, + question_input_ids: List[List[int]], + question_hidden_states: np.ndarray, + prefix=None, + n_docs=None, + return_tensors=None, + ) -> BatchEncoding: + """ + Retrieves documents for specified `question_hidden_states`. + + Args: + question_input_ids (`List[List[int]]`) batch of input ids + question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`: + A batch of query vectors to retrieve with. + prefix (`str`, *optional*): + The prefix used by the generator's tokenizer. + n_docs (`int`, *optional*): + The number of docs retrieved per query. + return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + + Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **context_input_ids** -- List of token ids to be fed to a model. + + [What are input IDs?](../glossary#input-ids) + + - **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model + (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). + + [What are attention masks?](../glossary#attention-mask) + + - **retrieved_doc_embeds** -- List of embeddings of the retrieved documents + - **doc_ids** -- List of ids of the retrieved documents + """ + + n_docs = n_docs if n_docs is not None else self.n_docs + prefix = prefix if prefix is not None else self.config.generator.prefix + retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs) + + input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True) + context_input_ids, context_attention_mask = self.postprocess_docs( + docs, input_strings, prefix, n_docs, return_tensors=return_tensors + ) + + if self.return_tokenized_docs: + retrieved_doc_text = [] + retrieved_doc_title = [] + + for b_idx in range(len(docs)): + for doc_idx in range(n_docs): + retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) + retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) + + tokenized_docs = self.ctx_encoder_tokenizer( + retrieved_doc_title, + retrieved_doc_text, + truncation=True, + padding="longest", + return_tensors=return_tensors, + ) + + return BatchEncoding( + { + "context_input_ids": context_input_ids, + "context_attention_mask": context_attention_mask, + "retrieved_doc_embeds": retrieved_doc_embeds, + "doc_ids": doc_ids, + "tokenized_doc_ids": tokenized_docs["input_ids"], + "tokenized_doc_attention_mask": tokenized_docs["attention_mask"], + }, + tensor_type=return_tensors, + ) + + else: + return BatchEncoding( + { + "context_input_ids": context_input_ids, + "context_attention_mask": context_attention_mask, + "retrieved_doc_embeds": retrieved_doc_embeds, + "doc_ids": doc_ids, + }, + tensor_type=return_tensors, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py b/venv/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6ec67e6bf879edeb2ead9045fab52507706d65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/rag/tokenization_rag.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# Copyright 2020, The RAG Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for RAG.""" +import os +import warnings +from typing import List, Optional + +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from .configuration_rag import RagConfig + + +logger = logging.get_logger(__name__) + + +class RagTokenizer: + def __init__(self, question_encoder, generator): + self.question_encoder = question_encoder + self.generator = generator + self.current_tokenizer = self.question_encoder + + def save_pretrained(self, save_directory): + if os.path.isfile(save_directory): + raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") + os.makedirs(save_directory, exist_ok=True) + question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer") + generator_path = os.path.join(save_directory, "generator_tokenizer") + self.question_encoder.save_pretrained(question_encoder_path) + self.generator.save_pretrained(generator_path) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + # dynamically import AutoTokenizer + from ..auto.tokenization_auto import AutoTokenizer + + config = kwargs.pop("config", None) + + if config is None: + config = RagConfig.from_pretrained(pretrained_model_name_or_path) + + question_encoder = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer" + ) + generator = AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer" + ) + return cls(question_encoder=question_encoder, generator=generator) + + def __call__(self, *args, **kwargs): + return self.current_tokenizer(*args, **kwargs) + + def batch_decode(self, *args, **kwargs): + return self.generator.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + return self.generator.decode(*args, **kwargs) + + def _switch_to_input_mode(self): + self.current_tokenizer = self.question_encoder + + def _switch_to_target_mode(self): + self.current_tokenizer = self.generator + + def prepare_seq2seq_batch( + self, + src_texts: List[str], + tgt_texts: Optional[List[str]] = None, + max_length: Optional[int] = None, + max_target_length: Optional[int] = None, + padding: str = "longest", + return_tensors: str = None, + truncation: bool = True, + **kwargs, + ) -> BatchEncoding: + warnings.warn( + "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " + "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " + "context manager to prepare your targets. See the documentation of your specific tokenizer for more " + "details", + FutureWarning, + ) + if max_length is None: + max_length = self.current_tokenizer.model_max_length + model_inputs = self( + src_texts, + add_special_tokens=True, + return_tensors=return_tensors, + max_length=max_length, + padding=padding, + truncation=truncation, + **kwargs, + ) + if tgt_texts is None: + return model_inputs + # Process tgt_texts + if max_target_length is None: + max_target_length = self.current_tokenizer.model_max_length + labels = self( + text_target=tgt_texts, + add_special_tokens=True, + return_tensors=return_tensors, + padding=padding, + max_length=max_target_length, + truncation=truncation, + **kwargs, + ) + model_inputs["labels"] = labels["input_ids"] + return model_inputs diff --git a/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d7fe44fae67865220c0f9c094397b7a0c36f1c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dafa1a70eab747861ce390f44fa23dc5c771f5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47caf57870440fde7a507547c7df525a34314396 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc differ