diff --git a/.gitattributes b/.gitattributes index 96990380a73ed8d3e2d874ef68b4010ffde572c5..b9777c2d22ffefae2b8b1cf63064ca3f307d724b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -120,3 +120,5 @@ lm-evaluation-harness/wandb/run-20240606_061005-a998jkug/run-a998jkug.wandb filt lm-evaluation-harness/wandb/run-20240606_033253-tgejm7dv/run-tgejm7dv.wandb filter=lfs diff=lfs merge=lfs -text lm-evaluation-harness/wandb/run-20240608_162527-0vroecl5/run-0vroecl5.wandb filter=lfs diff=lfs merge=lfs -text lm-evaluation-harness/wandb/run-20240608_150638-5wz6ydpy/run-5wz6ydpy.wandb filter=lfs diff=lfs merge=lfs -text +lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb filter=lfs diff=lfs merge=lfs -text +lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..3740a01c0f7d45414b125edd72a2c26211232c99 --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceff9f1cf97bed0b1d54258327901f59e836e72a6b0ef468a8613243dee8a95a +size 33555627 diff --git a/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3bf2ce85d270d6d52760c0a8d5ad77d0f526ee8c --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a977e3b304209fc1f3a39b12b9e242a3b04adc734318238b30405de6975a7ab +size 33555533 diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9400ed6fb4b71cbea320fefea3535c49fbdff733 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704566 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..ad2be202ad07d4a721c75c93d8c763e19b03f389 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:36:07,358 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:36:11,911 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:36:11,913 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:36:11,913 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..647bbe9c9f0f1c7c648d3aedd62784c14f682e67 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:36:07.214234", + "startedAt": "2024-05-14T16:36:06.732839", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.0355723684215, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3292.895, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3292.903, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3292.929, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3298.22, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3293.82, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4253959655762 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..030ea6a2fdbc1a0fa8ec354946139c5b70f80af0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Configure stats pid to 123909 +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:36:06,741 WARNING MainThread:123909 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug.log +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug-internal.log +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:init():610] starting backend +2024-05-14 16:36:06,741 INFO MainThread:123909 [wandb_init.py:init():614] setting up manager +2024-05-14 16:36:06,743 INFO MainThread:123909 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:36:06,743 INFO MainThread:123909 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:36:06,746 INFO MainThread:123909 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:36:06,757 INFO MainThread:123909 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:36:07,037 INFO MainThread:123909 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:36:07,116 INFO MainThread:123909 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:36:07,117 INFO MainThread:123909 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:36:07,355 INFO MainThread:123909 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:36:07,355 INFO MainThread:123909 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:36:07,356 INFO MainThread:123909 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:36:07,356 INFO MainThread:123909 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:36:07,357 INFO MainThread:123909 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:36:16,013 WARNING MsgRouterThr:123909 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/run-als7uk9d.wandb b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/run-als7uk9d.wandb new file mode 100644 index 0000000000000000000000000000000000000000..a3e94d9a9af31a5571e1b9d33c8bfe110442863d Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/run-als7uk9d.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..770bd3a3bacd57b25320b34641708cdf1cd2fa1d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704623 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..8f341e47fe27db0f628ea85eb4c7219c4b208d22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:37:03,917 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:37:08,616 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:37:08,618 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:37:08,618 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step20 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step20/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0c256ceb345ea7c8460835153f8e535c9fe8cb67 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:37:03.779413", + "startedAt": "2024-05-14T16:37:03.350505", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3394.4844539473684, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3221.316, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.013, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4234619140625 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..74ae82ca002f26112178f0cd636ac5b92bf8e035 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 6}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..8cdd176fe63be116cae6bde3b9a91a373f0acbfb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-14 16:37:03,363 INFO StreamThr :127593 [internal.py:wandb_internal():85] W&B internal server running at pid: 127593, started at: 2024-05-14 16:37:03.362980 +2024-05-14 16:37:03,365 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:37:03,366 INFO WriterThread:127593 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/run-yrnu0z9e.wandb +2024-05-14 16:37:03,367 DEBUG SenderThread:127593 [sender.py:send():378] send: header +2024-05-14 16:37:03,387 DEBUG SenderThread:127593 [sender.py:send():378] send: run +2024-05-14 16:37:03,600 INFO SenderThread:127593 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files +2024-05-14 16:37:03,600 INFO SenderThread:127593 [sender.py:_start_run_threads():1123] run started: yrnu0z9e with start time 1715704623.362702 +2024-05-14 16:37:03,611 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:37:03,611 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:37:03,692 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:37:03,694 DEBUG HandlerThread:127593 [system_info.py:__init__():26] System info init +2024-05-14 16:37:03,694 DEBUG HandlerThread:127593 [system_info.py:__init__():41] System info init done +2024-05-14 16:37:03,694 INFO HandlerThread:127593 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:37:03,694 INFO SystemMonitor:127593 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:37:03,694 INFO HandlerThread:127593 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:37:03,695 INFO SystemMonitor:127593 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:37:03,695 INFO SystemMonitor:127593 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:37:03,696 INFO SystemMonitor:127593 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:37:03,696 INFO SystemMonitor:127593 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:37:03,779 DEBUG HandlerThread:127593 [system_info.py:probe():150] Probing system +2024-05-14 16:37:03,787 DEBUG HandlerThread:127593 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:37:03,807 ERROR HandlerThread:127593 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:37:03,807 DEBUG HandlerThread:127593 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:37:03,807 DEBUG HandlerThread:127593 [system_info.py:probe():198] Probing system done +2024-05-14 16:37:03,807 DEBUG HandlerThread:127593 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:37:03.779413', 'startedAt': '2024-05-14T16:37:03.350505', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.4844539473684, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3221.316, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.013, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4234619140625}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:37:03,807 INFO HandlerThread:127593 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:37:03,807 INFO HandlerThread:127593 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:37:03,808 INFO HandlerThread:127593 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:37:03,812 DEBUG SenderThread:127593 [sender.py:send():378] send: files +2024-05-14 16:37:03,812 INFO SenderThread:127593 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:37:03,913 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:37:03,913 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:37:03,914 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:37:03,915 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:37:04,085 DEBUG SenderThread:127593 [sender.py:send():378] send: telemetry +2024-05-14 16:37:04,310 INFO wandb-upload_0:127593 [upload_job.py:push():130] Uploaded file /tmp/tmpcyaic29owandb/r8zf5mxx-wandb-metadata.json +2024-05-14 16:37:04,601 INFO Thread-12 :127593 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-metadata.json +2024-05-14 16:37:04,601 INFO Thread-12 :127593 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt +2024-05-14 16:37:04,601 INFO Thread-12 :127593 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log +2024-05-14 16:37:06,601 INFO Thread-12 :127593 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log +2024-05-14 16:37:08,617 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:09,712 DEBUG SenderThread:127593 [sender.py:send():378] send: exit +2024-05-14 16:37:09,712 INFO SenderThread:127593 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:37:09,712 INFO SenderThread:127593 [sender.py:send_exit():587] handling runtime: 6 +2024-05-14 16:37:09,713 INFO SenderThread:127593 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:09,713 INFO SenderThread:127593 [sender.py:send_exit():593] send defer +2024-05-14 16:37:09,714 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,714 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:37:09,714 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,714 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:37:09,714 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:37:09,714 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,714 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:37:09,714 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,714 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:37:09,714 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:37:09,714 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,714 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:37:09,714 INFO HandlerThread:127593 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:37:09,714 DEBUG SystemMonitor:127593 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:37:09,715 INFO HandlerThread:127593 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:37:09,715 DEBUG SystemMonitor:127593 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:37:09,715 INFO HandlerThread:127593 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:37:09,715 DEBUG SystemMonitor:127593 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:37:09,715 INFO HandlerThread:127593 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:37:09,716 INFO HandlerThread:127593 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:37:09,717 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,717 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:37:09,717 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:37:09,717 DEBUG SenderThread:127593 [sender.py:send():378] send: stats +2024-05-14 16:37:09,717 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,717 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:37:09,718 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,718 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:37:09,718 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:37:09,718 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,718 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:37:09,718 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,718 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:37:09,718 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:37:09,718 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,718 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:37:09,718 DEBUG SenderThread:127593 [sender.py:send():378] send: summary +2024-05-14 16:37:09,719 INFO SenderThread:127593 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:37:09,719 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,719 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:37:09,719 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:37:09,719 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,719 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:37:09,719 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,719 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:37:09,721 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:37:09,805 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:37:09,805 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:09,805 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:37:09,805 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:09,805 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:37:10,603 INFO Thread-12 :127593 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml +2024-05-14 16:37:10,604 INFO Thread-12 :127593 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log +2024-05-14 16:37:10,604 INFO Thread-12 :127593 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json +2024-05-14 16:37:10,712 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:12,105 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:37:12,105 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:12,105 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:12,105 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:37:12,106 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:12,106 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:37:12,106 INFO SenderThread:127593 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:37:12,106 INFO SenderThread:127593 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:37:12,106 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:37:12,106 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:12,106 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:37:12,106 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:12,106 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:37:12,106 INFO SenderThread:127593 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:37:12,605 INFO SenderThread:127593 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log +2024-05-14 16:37:12,605 INFO SenderThread:127593 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files +2024-05-14 16:37:12,605 INFO SenderThread:127593 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json wandb-summary.json +2024-05-14 16:37:12,606 INFO SenderThread:127593 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt requirements.txt +2024-05-14 16:37:12,606 INFO SenderThread:127593 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log output.log +2024-05-14 16:37:12,606 INFO SenderThread:127593 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml config.yaml +2024-05-14 16:37:12,607 INFO SenderThread:127593 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:37:12,607 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:37:12,609 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:12,609 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:37:12,610 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:12,611 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:37:12,611 INFO SenderThread:127593 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:12,713 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:12,713 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:12,846 INFO wandb-upload_0:127593 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/wandb-summary.json +2024-05-14 16:37:13,129 INFO wandb-upload_3:127593 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/config.yaml +2024-05-14 16:37:13,133 INFO wandb-upload_1:127593 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/requirements.txt +2024-05-14 16:37:13,143 INFO wandb-upload_2:127593 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/files/output.log +2024-05-14 16:37:13,343 INFO Thread-11 (_thread_body):127593 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:37:13,343 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,343 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:37:13,344 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,344 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:37:13,344 INFO SenderThread:127593 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:37:13,344 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:37:13,344 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,344 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:37:13,345 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,345 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:37:13,345 INFO SenderThread:127593 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:37:13,595 INFO SenderThread:127593 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:37:13,595 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:37:13,595 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,595 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:37:13,595 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,595 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:37:13,595 INFO SenderThread:127593 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:37:13,596 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:37:13,596 INFO HandlerThread:127593 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:37:13,596 DEBUG SenderThread:127593 [sender.py:send():378] send: final +2024-05-14 16:37:13,596 DEBUG SenderThread:127593 [sender.py:send():378] send: footer +2024-05-14 16:37:13,596 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: defer +2024-05-14 16:37:13,596 INFO SenderThread:127593 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:37:13,596 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:13,596 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:13,597 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:37:13,597 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:37:13,597 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:37:13,597 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:37:13,597 DEBUG SenderThread:127593 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:37:13,597 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:37:13,599 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:37:13,653 INFO MainThread:127593 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:37:13,653 INFO MainThread:127593 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:37:13,653 INFO MainThread:127593 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:37:13,653 DEBUG HandlerThread:127593 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:37:13,653 INFO HandlerThread:127593 [handler.py:finish():882] shutting down handler +2024-05-14 16:37:14,597 INFO WriterThread:127593 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/run-yrnu0z9e.wandb +2024-05-14 16:37:14,653 INFO SenderThread:127593 [sender.py:finish():1545] shutting down sender +2024-05-14 16:37:14,653 INFO SenderThread:127593 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:37:14,653 INFO SenderThread:127593 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..b7f00e56914daadd2e2ee13d01ca6ee607bafaa2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Configure stats pid to 126391 +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:37:03,359 WARNING MainThread:126391 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:37:03,359 INFO MainThread:126391 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug.log +2024-05-14 16:37:03,360 INFO MainThread:126391 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/logs/debug-internal.log +2024-05-14 16:37:03,360 INFO MainThread:126391 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:37:03,360 INFO MainThread:126391 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:37:03,360 INFO MainThread:126391 [wandb_init.py:init():610] starting backend +2024-05-14 16:37:03,360 INFO MainThread:126391 [wandb_init.py:init():614] setting up manager +2024-05-14 16:37:03,361 INFO MainThread:126391 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:37:03,362 INFO MainThread:126391 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:37:03,366 INFO MainThread:126391 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:37:03,386 INFO MainThread:126391 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:37:03,610 INFO MainThread:126391 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:37:03,688 INFO MainThread:126391 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:37:03,688 INFO MainThread:126391 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:37:03,913 INFO MainThread:126391 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:37:03,914 INFO MainThread:126391 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:37:03,914 INFO MainThread:126391 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:37:03,914 INFO MainThread:126391 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:37:03,915 INFO MainThread:126391 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:37:14,654 WARNING MsgRouterThr:126391 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/run-yrnu0z9e.wandb b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/run-yrnu0z9e.wandb new file mode 100644 index 0000000000000000000000000000000000000000..1a09f57c21c458f95c8351124950a31d0af11993 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163703-yrnu0z9e/run-yrnu0z9e.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6bf82b885a060e3af55cdcfa6d36d93e078b9a6e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716404459 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..4110106adadb3c7ffe5ce2289f5ee36adee2465e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:19:01:00,231 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:19:01:08,752 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:19:01:08,753 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:19:01:08,753 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000'} +2024-05-22:19:01:11,047 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..e258e2aab7caeca6bea93b6b4ce3df9d3796d486 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T19:01:00.014205", + "startedAt": "2024-05-22T19:00:59.519129", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2326.343275, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63861465454102 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..e88d55f55fdc42800ecef1c45ca2eecc58bd667c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 19:00:59,545 INFO StreamThr :4606 [internal.py:wandb_internal():85] W&B internal server running at pid: 4606, started at: 2024-05-22 19:00:59.543074 +2024-05-22 19:00:59,549 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: status +2024-05-22 19:00:59,550 INFO WriterThread:4606 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/run-u7hnnuee.wandb +2024-05-22 19:00:59,556 DEBUG SenderThread:4606 [sender.py:send():378] send: header +2024-05-22 19:00:59,556 DEBUG SenderThread:4606 [sender.py:send():378] send: run +2024-05-22 19:00:59,810 INFO SenderThread:4606 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files +2024-05-22 19:00:59,811 INFO SenderThread:4606 [sender.py:_start_run_threads():1123] run started: u7hnnuee with start time 1716404459.542933 +2024-05-22 19:00:59,815 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 19:00:59,815 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: check_version +2024-05-22 19:00:59,938 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 19:00:59,940 DEBUG HandlerThread:4606 [system_info.py:__init__():26] System info init +2024-05-22 19:00:59,940 DEBUG HandlerThread:4606 [system_info.py:__init__():41] System info init done +2024-05-22 19:00:59,940 INFO HandlerThread:4606 [system_monitor.py:start():194] Starting system monitor +2024-05-22 19:00:59,940 INFO SystemMonitor:4606 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 19:00:59,940 INFO HandlerThread:4606 [system_monitor.py:probe():214] Collecting system info +2024-05-22 19:00:59,947 INFO SystemMonitor:4606 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 19:00:59,948 INFO SystemMonitor:4606 [interfaces.py:start():188] Started disk monitoring +2024-05-22 19:00:59,949 INFO SystemMonitor:4606 [interfaces.py:start():188] Started memory monitoring +2024-05-22 19:00:59,951 INFO SystemMonitor:4606 [interfaces.py:start():188] Started network monitoring +2024-05-22 19:01:00,014 DEBUG HandlerThread:4606 [system_info.py:probe():150] Probing system +2024-05-22 19:01:00,017 DEBUG HandlerThread:4606 [system_info.py:_probe_git():135] Probing git +2024-05-22 19:01:00,028 ERROR HandlerThread:4606 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 19:01:00,028 DEBUG HandlerThread:4606 [system_info.py:_probe_git():143] Probing git done +2024-05-22 19:01:00,028 DEBUG HandlerThread:4606 [system_info.py:probe():198] Probing system done +2024-05-22 19:01:00,028 DEBUG HandlerThread:4606 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T19:01:00.014205', 'startedAt': '2024-05-22T19:00:59.519129', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2326.343275, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63861465454102}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 19:01:00,028 INFO HandlerThread:4606 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 19:01:00,028 INFO HandlerThread:4606 [system_monitor.py:probe():227] Publishing system info +2024-05-22 19:01:00,031 INFO HandlerThread:4606 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 19:01:00,037 DEBUG SenderThread:4606 [sender.py:send():378] send: files +2024-05-22 19:01:00,037 INFO SenderThread:4606 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 19:01:00,224 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 19:01:00,224 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: python_packages +2024-05-22 19:01:00,226 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 19:01:00,227 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: stop_status +2024-05-22 19:01:00,406 DEBUG SenderThread:4606 [sender.py:send():378] send: telemetry +2024-05-22 19:01:00,634 INFO wandb-upload_0:4606 [upload_job.py:push():130] Uploaded file /tmp/tmplp0r04n5wandb/8p8kln7f-wandb-metadata.json +2024-05-22 19:01:00,813 INFO Thread-12 :4606 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt +2024-05-22 19:01:00,813 INFO Thread-12 :4606 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-metadata.json +2024-05-22 19:01:00,813 INFO Thread-12 :4606 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log +2024-05-22 19:01:02,814 INFO Thread-12 :4606 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log +2024-05-22 19:01:05,408 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 19:01:10,754 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 19:01:10,821 INFO Thread-12 :4606 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log +2024-05-22 19:01:11,055 DEBUG SenderThread:4606 [sender.py:send():378] send: exit +2024-05-22 19:01:11,055 INFO SenderThread:4606 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 19:01:11,055 INFO SenderThread:4606 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 19:01:11,057 INFO SenderThread:4606 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 19:01:11,057 INFO SenderThread:4606 [sender.py:send_exit():593] send defer +2024-05-22 19:01:11,057 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,057 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 19:01:11,057 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,057 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 19:01:11,057 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 1 +2024-05-22 19:01:11,058 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,058 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 19:01:11,058 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,058 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 19:01:11,058 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 2 +2024-05-22 19:01:11,058 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,058 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 19:01:11,058 INFO HandlerThread:4606 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 19:01:11,058 DEBUG SystemMonitor:4606 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 19:01:11,058 DEBUG SystemMonitor:4606 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 19:01:11,059 DEBUG SystemMonitor:4606 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 19:01:11,058 INFO HandlerThread:4606 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 19:01:11,060 INFO HandlerThread:4606 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 19:01:11,060 INFO HandlerThread:4606 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 19:01:11,060 INFO HandlerThread:4606 [interfaces.py:finish():200] Joined network monitor +2024-05-22 19:01:11,061 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,061 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 19:01:11,061 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 3 +2024-05-22 19:01:11,061 DEBUG SenderThread:4606 [sender.py:send():378] send: stats +2024-05-22 19:01:11,061 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,062 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 19:01:11,062 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,062 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 19:01:11,062 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 4 +2024-05-22 19:01:11,062 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,062 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 19:01:11,063 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,063 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 19:01:11,063 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 5 +2024-05-22 19:01:11,063 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,063 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 19:01:11,063 DEBUG SenderThread:4606 [sender.py:send():378] send: summary +2024-05-22 19:01:11,064 INFO SenderThread:4606 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 19:01:11,064 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,064 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 19:01:11,064 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 6 +2024-05-22 19:01:11,064 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,064 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 19:01:11,064 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,064 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 19:01:11,069 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 19:01:11,140 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 7 +2024-05-22 19:01:11,141 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:11,141 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 19:01:11,141 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:11,141 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 19:01:11,822 INFO Thread-12 :4606 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml +2024-05-22 19:01:11,823 INFO Thread-12 :4606 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json +2024-05-22 19:01:12,055 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 19:01:12,430 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 8 +2024-05-22 19:01:12,430 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 19:01:12,430 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:12,430 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 19:01:12,431 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:12,431 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 19:01:12,431 INFO SenderThread:4606 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 19:01:12,431 INFO SenderThread:4606 [job_builder.py:_get_source_type():576] no source found +2024-05-22 19:01:12,431 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 9 +2024-05-22 19:01:12,431 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:12,431 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 19:01:12,432 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:12,432 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 19:01:12,432 INFO SenderThread:4606 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 19:01:12,824 INFO SenderThread:4606 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log +2024-05-22 19:01:12,825 INFO SenderThread:4606 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files +2024-05-22 19:01:12,825 INFO SenderThread:4606 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json wandb-summary.json +2024-05-22 19:01:12,825 INFO SenderThread:4606 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml config.yaml +2024-05-22 19:01:12,827 INFO SenderThread:4606 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log output.log +2024-05-22 19:01:12,829 INFO SenderThread:4606 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt requirements.txt +2024-05-22 19:01:12,829 INFO SenderThread:4606 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-metadata.json wandb-metadata.json +2024-05-22 19:01:12,830 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 10 +2024-05-22 19:01:12,831 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:12,831 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 19:01:12,832 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:12,834 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 19:01:12,834 INFO SenderThread:4606 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 19:01:13,060 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 19:01:13,060 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 19:01:13,061 INFO wandb-upload_0:4606 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/wandb-summary.json +2024-05-22 19:01:13,383 INFO wandb-upload_3:4606 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/requirements.txt +2024-05-22 19:01:13,404 INFO wandb-upload_2:4606 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/output.log +2024-05-22 19:01:13,437 INFO wandb-upload_1:4606 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/files/config.yaml +2024-05-22 19:01:13,638 INFO Thread-11 (_thread_body):4606 [sender.py:transition_state():613] send defer: 11 +2024-05-22 19:01:13,638 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:13,638 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 19:01:13,638 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:13,638 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 19:01:13,638 INFO SenderThread:4606 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 19:01:13,638 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 12 +2024-05-22 19:01:13,638 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:13,639 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 19:01:13,639 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:13,639 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 19:01:13,639 INFO SenderThread:4606 [file_stream.py:finish():601] file stream finish called +2024-05-22 19:01:13,698 INFO SenderThread:4606 [file_stream.py:finish():605] file stream finish is done +2024-05-22 19:01:13,698 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 13 +2024-05-22 19:01:13,698 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:13,698 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 19:01:13,698 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:13,698 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 19:01:13,698 INFO SenderThread:4606 [sender.py:transition_state():613] send defer: 14 +2024-05-22 19:01:13,698 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: defer +2024-05-22 19:01:13,698 INFO HandlerThread:4606 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 19:01:13,698 DEBUG SenderThread:4606 [sender.py:send():378] send: final +2024-05-22 19:01:13,698 DEBUG SenderThread:4606 [sender.py:send():378] send: footer +2024-05-22 19:01:13,698 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: defer +2024-05-22 19:01:13,699 INFO SenderThread:4606 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 19:01:13,699 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 19:01:13,699 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 19:01:13,699 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 19:01:13,699 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 19:01:13,699 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 19:01:13,700 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 19:01:13,700 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 19:01:13,700 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 19:01:13,700 DEBUG SenderThread:4606 [sender.py:send_request():405] send_request: server_info +2024-05-22 19:01:13,761 INFO MainThread:4606 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 19:01:13,761 INFO MainThread:4606 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 19:01:13,761 INFO MainThread:4606 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 19:01:13,761 DEBUG HandlerThread:4606 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 19:01:13,761 INFO HandlerThread:4606 [handler.py:finish():882] shutting down handler +2024-05-22 19:01:14,700 INFO WriterThread:4606 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/run-u7hnnuee.wandb +2024-05-22 19:01:14,761 INFO SenderThread:4606 [sender.py:finish():1545] shutting down sender +2024-05-22 19:01:14,761 INFO SenderThread:4606 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 19:01:14,761 INFO SenderThread:4606 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..dc73983cda4ec0da81d183871e0aa965f156ec7a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 19:00:59,536 INFO MainThread:4451 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Configure stats pid to 4451 +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 19:00:59,537 WARNING MainThread:4451 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug.log +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/logs/debug-internal.log +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:init():560] calling init triggers +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:init():610] starting backend +2024-05-22 19:00:59,537 INFO MainThread:4451 [wandb_init.py:init():614] setting up manager +2024-05-22 19:00:59,541 INFO MainThread:4451 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 19:00:59,542 INFO MainThread:4451 [wandb_init.py:init():622] backend started and connected +2024-05-22 19:00:59,546 INFO MainThread:4451 [wandb_init.py:init():711] updated telemetry +2024-05-22 19:00:59,555 INFO MainThread:4451 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 19:00:59,815 INFO MainThread:4451 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 19:00:59,931 INFO MainThread:4451 [wandb_run.py:_on_init():2405] got version response +2024-05-22 19:00:59,931 INFO MainThread:4451 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 19:01:00,225 INFO MainThread:4451 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 19:01:00,225 INFO MainThread:4451 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 19:01:00,226 INFO MainThread:4451 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 19:01:00,226 INFO MainThread:4451 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 19:01:00,228 INFO MainThread:4451 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 19:01:14,762 WARNING MsgRouterThr:4451 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/run-u7hnnuee.wandb b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/run-u7hnnuee.wandb new file mode 100644 index 0000000000000000000000000000000000000000..092726a81c9ab232c798dcca823a04a363e5107c Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_190059-u7hnnuee/run-u7hnnuee.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8eff0fb9f7a04f5ca8d1237a6b588c383cae62df --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716440273 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..b3699480e5b2b90142f02f4d4e9252719a13f4ad --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:04:57:54,216 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:04:58:02,677 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:04:58:02,678 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:04:58:02,679 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'} +2024-05-23:04:58:04,999 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..4677cbf1c8930d4f381f46f98cba527216f7b886 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 04:57:53,543 INFO StreamThr :2242 [internal.py:wandb_internal():85] W&B internal server running at pid: 2242, started at: 2024-05-23 04:57:53.541266 +2024-05-23 04:57:53,546 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: status +2024-05-23 04:57:53,547 INFO WriterThread:2242 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/run-5fxibcq9.wandb +2024-05-23 04:57:53,549 DEBUG SenderThread:2242 [sender.py:send():378] send: header +2024-05-23 04:57:53,553 DEBUG SenderThread:2242 [sender.py:send():378] send: run +2024-05-23 04:57:53,812 INFO SenderThread:2242 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files +2024-05-23 04:57:53,812 INFO SenderThread:2242 [sender.py:_start_run_threads():1123] run started: 5fxibcq9 with start time 1716440273.541347 +2024-05-23 04:57:53,816 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 04:57:53,817 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: check_version +2024-05-23 04:57:53,940 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 04:57:53,942 DEBUG HandlerThread:2242 [system_info.py:__init__():26] System info init +2024-05-23 04:57:53,942 DEBUG HandlerThread:2242 [system_info.py:__init__():41] System info init done +2024-05-23 04:57:53,942 INFO HandlerThread:2242 [system_monitor.py:start():194] Starting system monitor +2024-05-23 04:57:53,942 INFO SystemMonitor:2242 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 04:57:53,942 INFO HandlerThread:2242 [system_monitor.py:probe():214] Collecting system info +2024-05-23 04:57:53,949 INFO SystemMonitor:2242 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 04:57:53,950 INFO SystemMonitor:2242 [interfaces.py:start():188] Started disk monitoring +2024-05-23 04:57:53,950 INFO SystemMonitor:2242 [interfaces.py:start():188] Started memory monitoring +2024-05-23 04:57:53,952 INFO SystemMonitor:2242 [interfaces.py:start():188] Started network monitoring +2024-05-23 04:57:54,016 DEBUG HandlerThread:2242 [system_info.py:probe():150] Probing system +2024-05-23 04:57:54,019 DEBUG HandlerThread:2242 [system_info.py:_probe_git():135] Probing git +2024-05-23 04:57:54,029 ERROR HandlerThread:2242 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 04:57:54,029 DEBUG HandlerThread:2242 [system_info.py:_probe_git():143] Probing git done +2024-05-23 04:57:54,029 DEBUG HandlerThread:2242 [system_info.py:probe():198] Probing system done +2024-05-23 04:57:54,029 DEBUG HandlerThread:2242 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T04:57:54.016258', 'startedAt': '2024-05-23T04:57:53.524310', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.1786062500005, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.19118118286133}}, 'memory': {'total': 1007.43798828125}} +2024-05-23 04:57:54,030 INFO HandlerThread:2242 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 04:57:54,030 INFO HandlerThread:2242 [system_monitor.py:probe():227] Publishing system info +2024-05-23 04:57:54,032 INFO HandlerThread:2242 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 04:57:54,037 DEBUG SenderThread:2242 [sender.py:send():378] send: files +2024-05-23 04:57:54,038 INFO SenderThread:2242 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 04:57:54,210 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 04:57:54,210 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: python_packages +2024-05-23 04:57:54,212 DEBUG SenderThread:2242 [sender.py:send():378] send: telemetry +2024-05-23 04:57:54,256 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 04:57:54,256 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: stop_status +2024-05-23 04:57:54,623 INFO wandb-upload_0:2242 [upload_job.py:push():130] Uploaded file /tmp/tmp8mp9896nwandb/kbwqsg8a-wandb-metadata.json +2024-05-23 04:57:54,814 INFO Thread-12 :2242 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-metadata.json +2024-05-23 04:57:54,814 INFO Thread-12 :2242 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt +2024-05-23 04:57:54,815 INFO Thread-12 :2242 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log +2024-05-23 04:57:56,814 INFO Thread-12 :2242 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log +2024-05-23 04:57:59,421 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:58:04,679 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:58:04,821 INFO Thread-12 :2242 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log +2024-05-23 04:58:05,006 DEBUG SenderThread:2242 [sender.py:send():378] send: exit +2024-05-23 04:58:05,006 INFO SenderThread:2242 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 04:58:05,006 INFO SenderThread:2242 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 04:58:05,007 INFO SenderThread:2242 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:58:05,008 INFO SenderThread:2242 [sender.py:send_exit():593] send defer +2024-05-23 04:58:05,008 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,008 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 04:58:05,008 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,008 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 04:58:05,008 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 1 +2024-05-23 04:58:05,008 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,008 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 04:58:05,008 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,008 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 04:58:05,008 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 2 +2024-05-23 04:58:05,008 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,008 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 04:58:05,008 INFO HandlerThread:2242 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 04:58:05,009 DEBUG SystemMonitor:2242 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 04:58:05,009 DEBUG SystemMonitor:2242 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 04:58:05,009 DEBUG SystemMonitor:2242 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 04:58:05,009 INFO HandlerThread:2242 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 04:58:05,010 INFO HandlerThread:2242 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 04:58:05,010 INFO HandlerThread:2242 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 04:58:05,010 INFO HandlerThread:2242 [interfaces.py:finish():200] Joined network monitor +2024-05-23 04:58:05,010 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,010 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 04:58:05,010 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 3 +2024-05-23 04:58:05,010 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,010 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 04:58:05,010 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,010 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 04:58:05,010 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 4 +2024-05-23 04:58:05,010 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,010 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 04:58:05,011 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,011 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 04:58:05,011 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 5 +2024-05-23 04:58:05,011 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,011 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 04:58:05,011 DEBUG SenderThread:2242 [sender.py:send():378] send: summary +2024-05-23 04:58:05,012 INFO SenderThread:2242 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:58:05,012 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,012 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 04:58:05,012 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 6 +2024-05-23 04:58:05,012 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,012 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 04:58:05,012 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,012 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 04:58:05,017 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:58:05,081 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 7 +2024-05-23 04:58:05,081 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:05,081 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 04:58:05,081 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:05,081 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 04:58:05,822 INFO Thread-12 :2242 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml +2024-05-23 04:58:05,822 INFO Thread-12 :2242 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json +2024-05-23 04:58:06,006 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:58:06,444 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 8 +2024-05-23 04:58:06,444 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:58:06,445 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:06,445 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 04:58:06,445 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:06,445 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 04:58:06,445 INFO SenderThread:2242 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 04:58:06,445 INFO SenderThread:2242 [job_builder.py:_get_source_type():576] no source found +2024-05-23 04:58:06,446 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 9 +2024-05-23 04:58:06,446 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:06,446 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 04:58:06,446 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:06,446 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 04:58:06,446 INFO SenderThread:2242 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 04:58:06,823 INFO SenderThread:2242 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log +2024-05-23 04:58:06,824 INFO SenderThread:2242 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files +2024-05-23 04:58:06,824 INFO SenderThread:2242 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-metadata.json wandb-metadata.json +2024-05-23 04:58:06,824 INFO SenderThread:2242 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log output.log +2024-05-23 04:58:06,824 INFO SenderThread:2242 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml config.yaml +2024-05-23 04:58:06,826 INFO SenderThread:2242 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt requirements.txt +2024-05-23 04:58:06,828 INFO SenderThread:2242 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json wandb-summary.json +2024-05-23 04:58:06,828 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 10 +2024-05-23 04:58:06,829 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:06,829 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 04:58:06,829 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:06,829 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 04:58:06,829 INFO SenderThread:2242 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:58:07,006 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:58:07,007 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:58:07,082 INFO wandb-upload_0:2242 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/output.log +2024-05-23 04:58:07,396 INFO wandb-upload_2:2242 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/requirements.txt +2024-05-23 04:58:07,419 INFO wandb-upload_3:2242 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/wandb-summary.json +2024-05-23 04:58:07,435 INFO wandb-upload_1:2242 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/files/config.yaml +2024-05-23 04:58:07,636 INFO Thread-11 (_thread_body):2242 [sender.py:transition_state():613] send defer: 11 +2024-05-23 04:58:07,636 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:07,636 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 04:58:07,636 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:07,636 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 04:58:07,636 INFO SenderThread:2242 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 04:58:07,636 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 12 +2024-05-23 04:58:07,637 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:07,637 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 04:58:07,637 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:07,637 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 04:58:07,637 INFO SenderThread:2242 [file_stream.py:finish():601] file stream finish called +2024-05-23 04:58:07,696 INFO SenderThread:2242 [file_stream.py:finish():605] file stream finish is done +2024-05-23 04:58:07,696 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 13 +2024-05-23 04:58:07,696 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:07,696 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 04:58:07,696 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:07,696 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 04:58:07,696 INFO SenderThread:2242 [sender.py:transition_state():613] send defer: 14 +2024-05-23 04:58:07,696 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:58:07,696 INFO HandlerThread:2242 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 04:58:07,696 DEBUG SenderThread:2242 [sender.py:send():378] send: final +2024-05-23 04:58:07,696 DEBUG SenderThread:2242 [sender.py:send():378] send: footer +2024-05-23 04:58:07,697 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: defer +2024-05-23 04:58:07,697 INFO SenderThread:2242 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 04:58:07,697 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:58:07,697 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:58:07,697 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 04:58:07,697 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 04:58:07,697 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 04:58:07,698 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 04:58:07,698 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:58:07,698 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:58:07,698 DEBUG SenderThread:2242 [sender.py:send_request():405] send_request: server_info +2024-05-23 04:58:07,751 INFO MainThread:2242 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 04:58:07,751 INFO MainThread:2242 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 04:58:07,751 INFO MainThread:2242 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 04:58:07,751 DEBUG HandlerThread:2242 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 04:58:07,751 INFO HandlerThread:2242 [handler.py:finish():882] shutting down handler +2024-05-23 04:58:08,698 INFO WriterThread:2242 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/run-5fxibcq9.wandb +2024-05-23 04:58:08,751 INFO SenderThread:2242 [sender.py:finish():1545] shutting down sender +2024-05-23 04:58:08,751 INFO SenderThread:2242 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:58:08,751 INFO SenderThread:2242 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..4209e9e57002af03266b8cf376c40e4f5e19c566 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Configure stats pid to 2087 +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 04:57:53,535 WARNING MainThread:2087 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 04:57:53,535 INFO MainThread:2087 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug.log +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045753-5fxibcq9/logs/debug-internal.log +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:init():560] calling init triggers +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:init():610] starting backend +2024-05-23 04:57:53,536 INFO MainThread:2087 [wandb_init.py:init():614] setting up manager +2024-05-23 04:57:53,540 INFO MainThread:2087 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 04:57:53,541 INFO MainThread:2087 [wandb_init.py:init():622] backend started and connected +2024-05-23 04:57:53,544 INFO MainThread:2087 [wandb_init.py:init():711] updated telemetry +2024-05-23 04:57:53,553 INFO MainThread:2087 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 04:57:53,816 INFO MainThread:2087 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 04:57:53,933 INFO MainThread:2087 [wandb_run.py:_on_init():2405] got version response +2024-05-23 04:57:53,934 INFO MainThread:2087 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 04:57:54,211 INFO MainThread:2087 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 04:57:54,211 INFO MainThread:2087 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 04:57:54,211 INFO MainThread:2087 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 04:57:54,211 INFO MainThread:2087 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 04:57:54,213 INFO MainThread:2087 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 04:58:08,752 WARNING MsgRouterThr:2087 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..964cddfd3b4e9987b3c823212c04fa230b9f2b69 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 07:56:43,451 INFO StreamThr :1311 [internal.py:wandb_internal():85] W&B internal server running at pid: 1311, started at: 2024-05-23 07:56:43.448007 +2024-05-23 07:56:43,454 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: status +2024-05-23 07:56:43,455 INFO WriterThread:1311 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/run-1ipd11uu.wandb +2024-05-23 07:56:43,457 DEBUG SenderThread:1311 [sender.py:send():378] send: header +2024-05-23 07:56:43,460 DEBUG SenderThread:1311 [sender.py:send():378] send: run +2024-05-23 07:56:43,736 INFO SenderThread:1311 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files +2024-05-23 07:56:43,736 INFO SenderThread:1311 [sender.py:_start_run_threads():1123] run started: 1ipd11uu with start time 1716451003.448096 +2024-05-23 07:56:43,737 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 07:56:43,737 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: check_version +2024-05-23 07:56:43,854 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 07:56:43,857 DEBUG HandlerThread:1311 [system_info.py:__init__():26] System info init +2024-05-23 07:56:43,857 DEBUG HandlerThread:1311 [system_info.py:__init__():41] System info init done +2024-05-23 07:56:43,857 INFO HandlerThread:1311 [system_monitor.py:start():194] Starting system monitor +2024-05-23 07:56:43,857 INFO SystemMonitor:1311 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 07:56:43,857 INFO HandlerThread:1311 [system_monitor.py:probe():214] Collecting system info +2024-05-23 07:56:43,864 INFO SystemMonitor:1311 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 07:56:43,864 INFO SystemMonitor:1311 [interfaces.py:start():188] Started disk monitoring +2024-05-23 07:56:43,866 INFO SystemMonitor:1311 [interfaces.py:start():188] Started memory monitoring +2024-05-23 07:56:43,866 INFO SystemMonitor:1311 [interfaces.py:start():188] Started network monitoring +2024-05-23 07:56:43,928 DEBUG HandlerThread:1311 [system_info.py:probe():150] Probing system +2024-05-23 07:56:43,932 DEBUG HandlerThread:1311 [system_info.py:_probe_git():135] Probing git +2024-05-23 07:56:43,941 ERROR HandlerThread:1311 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 07:56:43,941 DEBUG HandlerThread:1311 [system_info.py:_probe_git():143] Probing git done +2024-05-23 07:56:43,941 DEBUG HandlerThread:1311 [system_info.py:probe():198] Probing system done +2024-05-23 07:56:43,941 DEBUG HandlerThread:1311 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T07:56:43.929124', 'startedAt': '2024-05-23T07:56:43.429666', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.500025, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6347312927246}}, 'memory': {'total': 1007.4379806518555}} +2024-05-23 07:56:43,942 INFO HandlerThread:1311 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 07:56:43,942 INFO HandlerThread:1311 [system_monitor.py:probe():227] Publishing system info +2024-05-23 07:56:43,944 INFO HandlerThread:1311 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 07:56:43,949 DEBUG SenderThread:1311 [sender.py:send():378] send: files +2024-05-23 07:56:43,949 INFO SenderThread:1311 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 07:56:44,128 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 07:56:44,129 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: python_packages +2024-05-23 07:56:44,139 DEBUG SenderThread:1311 [sender.py:send():378] send: telemetry +2024-05-23 07:56:44,201 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 07:56:44,201 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: stop_status +2024-05-23 07:56:44,528 INFO wandb-upload_0:1311 [upload_job.py:push():130] Uploaded file /tmp/tmpsz8xgo2ewandb/56h2ibxr-wandb-metadata.json +2024-05-23 07:56:44,737 INFO Thread-12 :1311 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-metadata.json +2024-05-23 07:56:44,737 INFO Thread-12 :1311 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log +2024-05-23 07:56:44,737 INFO Thread-12 :1311 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/requirements.txt +2024-05-23 07:56:46,737 INFO Thread-12 :1311 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log +2024-05-23 07:56:49,353 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:56:54,746 INFO Thread-12 :1311 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log +2024-05-23 07:56:55,322 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:56:55,746 DEBUG SenderThread:1311 [sender.py:send():378] send: exit +2024-05-23 07:56:55,746 INFO SenderThread:1311 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 07:56:55,746 INFO SenderThread:1311 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 07:56:55,748 INFO SenderThread:1311 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:56:55,748 INFO SenderThread:1311 [sender.py:send_exit():593] send defer +2024-05-23 07:56:55,748 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,748 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 07:56:55,748 INFO Thread-12 :1311 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-summary.json +2024-05-23 07:56:55,749 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,749 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 07:56:55,749 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 1 +2024-05-23 07:56:55,749 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,749 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 07:56:55,749 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,749 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 07:56:55,749 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 2 +2024-05-23 07:56:55,749 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,749 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 07:56:55,749 INFO HandlerThread:1311 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 07:56:55,750 INFO HandlerThread:1311 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 07:56:55,750 DEBUG SystemMonitor:1311 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 07:56:55,750 INFO HandlerThread:1311 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 07:56:55,750 DEBUG SystemMonitor:1311 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 07:56:55,750 INFO HandlerThread:1311 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 07:56:55,750 DEBUG SystemMonitor:1311 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 07:56:55,750 INFO HandlerThread:1311 [interfaces.py:finish():200] Joined network monitor +2024-05-23 07:56:55,753 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,753 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 07:56:55,753 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 3 +2024-05-23 07:56:55,753 DEBUG SenderThread:1311 [sender.py:send():378] send: stats +2024-05-23 07:56:55,753 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,754 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 07:56:55,755 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,755 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 07:56:55,755 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 4 +2024-05-23 07:56:55,755 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,755 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 07:56:55,755 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,755 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 07:56:55,755 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 5 +2024-05-23 07:56:55,755 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,755 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 07:56:55,755 DEBUG SenderThread:1311 [sender.py:send():378] send: summary +2024-05-23 07:56:55,756 INFO SenderThread:1311 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:56:55,756 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,756 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 07:56:55,756 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 6 +2024-05-23 07:56:55,756 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,756 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 07:56:55,757 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,757 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 07:56:55,761 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:56:55,829 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 7 +2024-05-23 07:56:55,830 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:55,830 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 07:56:55,830 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:55,830 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 07:56:56,221 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 8 +2024-05-23 07:56:56,221 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:56,221 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 07:56:56,221 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:56,221 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 07:56:56,221 INFO SenderThread:1311 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 07:56:56,222 INFO SenderThread:1311 [job_builder.py:_get_source_type():576] no source found +2024-05-23 07:56:56,222 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 9 +2024-05-23 07:56:56,222 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:56,222 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 07:56:56,222 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:56,223 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 07:56:56,223 INFO SenderThread:1311 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 07:56:56,745 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:56:56,750 INFO SenderThread:1311 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/config.yaml +2024-05-23 07:56:56,750 INFO SenderThread:1311 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-summary.json +2024-05-23 07:56:56,750 INFO SenderThread:1311 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log +2024-05-23 07:56:56,750 INFO SenderThread:1311 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files +2024-05-23 07:56:56,751 INFO SenderThread:1311 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-metadata.json wandb-metadata.json +2024-05-23 07:56:56,751 INFO SenderThread:1311 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-summary.json wandb-summary.json +2024-05-23 07:56:56,751 INFO SenderThread:1311 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/config.yaml config.yaml +2024-05-23 07:56:56,753 INFO SenderThread:1311 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log output.log +2024-05-23 07:56:56,753 INFO SenderThread:1311 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/requirements.txt requirements.txt +2024-05-23 07:56:56,754 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 10 +2024-05-23 07:56:56,754 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:56:56,754 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:56,754 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 07:56:56,754 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:56,754 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 07:56:56,754 INFO SenderThread:1311 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:56:57,066 INFO wandb-upload_0:1311 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/wandb-summary.json +2024-05-23 07:56:57,324 INFO wandb-upload_3:1311 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/requirements.txt +2024-05-23 07:56:57,389 INFO wandb-upload_2:1311 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/output.log +2024-05-23 07:56:57,407 INFO wandb-upload_1:1311 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/files/config.yaml +2024-05-23 07:56:57,607 INFO Thread-11 (_thread_body):1311 [sender.py:transition_state():613] send defer: 11 +2024-05-23 07:56:57,607 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:57,607 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 07:56:57,607 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:57,608 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 07:56:57,608 INFO SenderThread:1311 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 07:56:57,608 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 12 +2024-05-23 07:56:57,608 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:57,608 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 07:56:57,608 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:57,608 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 07:56:57,608 INFO SenderThread:1311 [file_stream.py:finish():601] file stream finish called +2024-05-23 07:56:57,688 INFO SenderThread:1311 [file_stream.py:finish():605] file stream finish is done +2024-05-23 07:56:57,688 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 13 +2024-05-23 07:56:57,688 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:57,688 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 07:56:57,688 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:57,688 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 07:56:57,689 INFO SenderThread:1311 [sender.py:transition_state():613] send defer: 14 +2024-05-23 07:56:57,689 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:56:57,689 INFO HandlerThread:1311 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 07:56:57,689 DEBUG SenderThread:1311 [sender.py:send():378] send: final +2024-05-23 07:56:57,689 DEBUG SenderThread:1311 [sender.py:send():378] send: footer +2024-05-23 07:56:57,689 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: defer +2024-05-23 07:56:57,689 INFO SenderThread:1311 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 07:56:57,690 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 07:56:57,690 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:56:57,690 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:56:57,691 DEBUG SenderThread:1311 [sender.py:send_request():405] send_request: server_info +2024-05-23 07:56:57,741 INFO MainThread:1311 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 07:56:57,742 INFO MainThread:1311 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 07:56:57,742 INFO MainThread:1311 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 07:56:57,742 DEBUG HandlerThread:1311 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 07:56:57,742 INFO HandlerThread:1311 [handler.py:finish():882] shutting down handler +2024-05-23 07:56:58,690 INFO WriterThread:1311 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_075643-1ipd11uu/run-1ipd11uu.wandb +2024-05-23 07:56:58,741 INFO SenderThread:1311 [sender.py:finish():1545] shutting down sender +2024-05-23 07:56:58,741 INFO SenderThread:1311 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:56:58,742 INFO SenderThread:1311 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..bee5d62e086269ee6a96533f7e3596a539256a9e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 10}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..2d79c1d321cf472a4c48b32188e18d9b4c0aaa47 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 08:04:32,001 INFO StreamThr :4072 [internal.py:wandb_internal():85] W&B internal server running at pid: 4072, started at: 2024-05-23 08:04:31.997991 +2024-05-23 08:04:32,004 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: status +2024-05-23 08:04:32,005 INFO WriterThread:4072 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/run-9wr9a5nn.wandb +2024-05-23 08:04:32,007 DEBUG SenderThread:4072 [sender.py:send():378] send: header +2024-05-23 08:04:32,010 DEBUG SenderThread:4072 [sender.py:send():378] send: run +2024-05-23 08:04:32,308 INFO SenderThread:4072 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files +2024-05-23 08:04:32,308 INFO SenderThread:4072 [sender.py:_start_run_threads():1123] run started: 9wr9a5nn with start time 1716451471.998076 +2024-05-23 08:04:32,311 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 08:04:32,311 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: check_version +2024-05-23 08:04:32,428 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 08:04:32,430 DEBUG HandlerThread:4072 [system_info.py:__init__():26] System info init +2024-05-23 08:04:32,430 DEBUG HandlerThread:4072 [system_info.py:__init__():41] System info init done +2024-05-23 08:04:32,430 INFO HandlerThread:4072 [system_monitor.py:start():194] Starting system monitor +2024-05-23 08:04:32,430 INFO SystemMonitor:4072 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 08:04:32,430 INFO HandlerThread:4072 [system_monitor.py:probe():214] Collecting system info +2024-05-23 08:04:32,437 INFO SystemMonitor:4072 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 08:04:32,437 INFO SystemMonitor:4072 [interfaces.py:start():188] Started disk monitoring +2024-05-23 08:04:32,439 INFO SystemMonitor:4072 [interfaces.py:start():188] Started memory monitoring +2024-05-23 08:04:32,439 INFO SystemMonitor:4072 [interfaces.py:start():188] Started network monitoring +2024-05-23 08:04:32,501 DEBUG HandlerThread:4072 [system_info.py:probe():150] Probing system +2024-05-23 08:04:32,504 DEBUG HandlerThread:4072 [system_info.py:_probe_git():135] Probing git +2024-05-23 08:04:32,513 ERROR HandlerThread:4072 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 08:04:32,513 DEBUG HandlerThread:4072 [system_info.py:_probe_git():143] Probing git done +2024-05-23 08:04:32,513 DEBUG HandlerThread:4072 [system_info.py:probe():198] Probing system done +2024-05-23 08:04:32,513 DEBUG HandlerThread:4072 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T08:04:32.501310', 'startedAt': '2024-05-23T08:04:31.977880', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.50000625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64179611206055}}, 'memory': {'total': 1007.4379806518555}} +2024-05-23 08:04:32,514 INFO HandlerThread:4072 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 08:04:32,514 INFO HandlerThread:4072 [system_monitor.py:probe():227] Publishing system info +2024-05-23 08:04:32,520 INFO HandlerThread:4072 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 08:04:32,525 DEBUG SenderThread:4072 [sender.py:send():378] send: files +2024-05-23 08:04:32,525 INFO SenderThread:4072 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 08:04:32,700 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 08:04:32,700 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: python_packages +2024-05-23 08:04:32,701 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 08:04:32,706 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: stop_status +2024-05-23 08:04:32,800 DEBUG SenderThread:4072 [sender.py:send():378] send: telemetry +2024-05-23 08:04:33,098 INFO wandb-upload_0:4072 [upload_job.py:push():130] Uploaded file /tmp/tmpav6t4hy7wandb/fnzgkj6a-wandb-metadata.json +2024-05-23 08:04:33,310 INFO Thread-12 :4072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/requirements.txt +2024-05-23 08:04:33,310 INFO Thread-12 :4072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-metadata.json +2024-05-23 08:04:33,311 INFO Thread-12 :4072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log +2024-05-23 08:04:35,310 INFO Thread-12 :4072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log +2024-05-23 08:04:37,803 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 08:04:43,027 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 08:04:43,316 INFO Thread-12 :4072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log +2024-05-23 08:04:43,333 DEBUG SenderThread:4072 [sender.py:send():378] send: exit +2024-05-23 08:04:43,333 INFO SenderThread:4072 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 08:04:43,333 INFO SenderThread:4072 [sender.py:send_exit():587] handling runtime: 10 +2024-05-23 08:04:43,334 INFO SenderThread:4072 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 08:04:43,335 INFO SenderThread:4072 [sender.py:send_exit():593] send defer +2024-05-23 08:04:43,335 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,335 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 08:04:43,335 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,335 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 08:04:43,335 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 1 +2024-05-23 08:04:43,335 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,335 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 08:04:43,335 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,335 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 08:04:43,335 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 2 +2024-05-23 08:04:43,335 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,335 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 08:04:43,335 INFO HandlerThread:4072 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 08:04:43,336 DEBUG SystemMonitor:4072 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 08:04:43,336 DEBUG SystemMonitor:4072 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 08:04:43,336 DEBUG SystemMonitor:4072 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 08:04:43,339 INFO HandlerThread:4072 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 08:04:43,339 INFO HandlerThread:4072 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 08:04:43,339 INFO HandlerThread:4072 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 08:04:43,339 INFO HandlerThread:4072 [interfaces.py:finish():200] Joined network monitor +2024-05-23 08:04:43,339 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,339 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 08:04:43,339 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 3 +2024-05-23 08:04:43,340 DEBUG SenderThread:4072 [sender.py:send():378] send: stats +2024-05-23 08:04:43,341 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,341 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 08:04:43,341 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,341 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 08:04:43,341 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 4 +2024-05-23 08:04:43,341 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,341 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 08:04:43,341 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,341 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 08:04:43,341 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 5 +2024-05-23 08:04:43,341 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,341 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 08:04:43,341 DEBUG SenderThread:4072 [sender.py:send():378] send: summary +2024-05-23 08:04:43,346 INFO SenderThread:4072 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 08:04:43,346 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,346 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 08:04:43,346 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 6 +2024-05-23 08:04:43,346 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,346 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 08:04:43,346 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,346 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 08:04:43,351 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 08:04:43,417 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 7 +2024-05-23 08:04:43,417 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:43,417 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 08:04:43,417 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:43,418 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 08:04:44,317 INFO Thread-12 :4072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/config.yaml +2024-05-23 08:04:44,317 INFO Thread-12 :4072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json +2024-05-23 08:04:44,333 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 08:04:44,822 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 8 +2024-05-23 08:04:44,823 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 08:04:44,823 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:44,823 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 08:04:44,823 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:44,823 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 08:04:44,823 INFO SenderThread:4072 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 08:04:44,824 INFO SenderThread:4072 [job_builder.py:_get_source_type():576] no source found +2024-05-23 08:04:44,824 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 9 +2024-05-23 08:04:44,824 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:44,824 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 08:04:44,824 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:44,824 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 08:04:44,824 INFO SenderThread:4072 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 08:04:45,318 INFO SenderThread:4072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log +2024-05-23 08:04:45,319 INFO SenderThread:4072 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files +2024-05-23 08:04:45,319 INFO SenderThread:4072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/requirements.txt requirements.txt +2024-05-23 08:04:45,319 INFO SenderThread:4072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-metadata.json wandb-metadata.json +2024-05-23 08:04:45,319 INFO SenderThread:4072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json wandb-summary.json +2024-05-23 08:04:45,321 INFO SenderThread:4072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/config.yaml config.yaml +2024-05-23 08:04:45,323 INFO SenderThread:4072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log output.log +2024-05-23 08:04:45,323 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 10 +2024-05-23 08:04:45,324 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:45,324 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 08:04:45,324 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:45,324 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 08:04:45,324 INFO SenderThread:4072 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 08:04:45,337 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 08:04:45,337 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 08:04:45,616 INFO wandb-upload_0:4072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/requirements.txt +2024-05-23 08:04:45,871 INFO wandb-upload_2:4072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/config.yaml +2024-05-23 08:04:45,910 INFO wandb-upload_1:4072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/wandb-summary.json +2024-05-23 08:04:45,942 INFO wandb-upload_3:4072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/files/output.log +2024-05-23 08:04:46,142 INFO Thread-11 (_thread_body):4072 [sender.py:transition_state():613] send defer: 11 +2024-05-23 08:04:46,142 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:46,143 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 08:04:46,143 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:46,143 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 08:04:46,143 INFO SenderThread:4072 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 08:04:46,143 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 12 +2024-05-23 08:04:46,143 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:46,143 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 08:04:46,143 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:46,143 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 08:04:46,143 INFO SenderThread:4072 [file_stream.py:finish():601] file stream finish called +2024-05-23 08:04:46,218 INFO SenderThread:4072 [file_stream.py:finish():605] file stream finish is done +2024-05-23 08:04:46,218 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 13 +2024-05-23 08:04:46,218 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:46,218 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 08:04:46,218 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:46,218 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 08:04:46,218 INFO SenderThread:4072 [sender.py:transition_state():613] send defer: 14 +2024-05-23 08:04:46,218 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: defer +2024-05-23 08:04:46,218 INFO HandlerThread:4072 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 08:04:46,219 DEBUG SenderThread:4072 [sender.py:send():378] send: final +2024-05-23 08:04:46,219 DEBUG SenderThread:4072 [sender.py:send():378] send: footer +2024-05-23 08:04:46,219 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: defer +2024-05-23 08:04:46,219 INFO SenderThread:4072 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 08:04:46,220 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 08:04:46,220 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 08:04:46,220 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 08:04:46,220 DEBUG SenderThread:4072 [sender.py:send_request():405] send_request: server_info +2024-05-23 08:04:46,282 INFO MainThread:4072 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 08:04:46,283 INFO MainThread:4072 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 08:04:46,283 INFO MainThread:4072 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 08:04:46,283 DEBUG HandlerThread:4072 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 08:04:46,283 INFO HandlerThread:4072 [handler.py:finish():882] shutting down handler +2024-05-23 08:04:47,220 INFO WriterThread:4072 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/run-9wr9a5nn.wandb +2024-05-23 08:04:47,282 INFO SenderThread:4072 [sender.py:finish():1545] shutting down sender +2024-05-23 08:04:47,282 INFO SenderThread:4072 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 08:04:47,283 INFO SenderThread:4072 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..7e9e667e6cd3b671bb8d4df9363db8e781594107 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Configure stats pid to 3917 +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 08:04:31,992 WARNING MainThread:3917 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug.log +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/logs/debug-internal.log +2024-05-23 08:04:31,992 INFO MainThread:3917 [wandb_init.py:init():560] calling init triggers +2024-05-23 08:04:31,993 INFO MainThread:3917 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 08:04:31,993 INFO MainThread:3917 [wandb_init.py:init():610] starting backend +2024-05-23 08:04:31,993 INFO MainThread:3917 [wandb_init.py:init():614] setting up manager +2024-05-23 08:04:31,997 INFO MainThread:3917 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 08:04:31,997 INFO MainThread:3917 [wandb_init.py:init():622] backend started and connected +2024-05-23 08:04:32,001 INFO MainThread:3917 [wandb_init.py:init():711] updated telemetry +2024-05-23 08:04:32,010 INFO MainThread:3917 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 08:04:32,311 INFO MainThread:3917 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 08:04:32,421 INFO MainThread:3917 [wandb_run.py:_on_init():2405] got version response +2024-05-23 08:04:32,422 INFO MainThread:3917 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 08:04:32,701 INFO MainThread:3917 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 08:04:32,701 INFO MainThread:3917 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 08:04:32,701 INFO MainThread:3917 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 08:04:32,701 INFO MainThread:3917 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 08:04:32,703 INFO MainThread:3917 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 08:04:47,284 WARNING MsgRouterThr:3917 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/run-9wr9a5nn.wandb b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/run-9wr9a5nn.wandb new file mode 100644 index 0000000000000000000000000000000000000000..a81abc30b14357019682cbb8e7c93a112071d826 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_080431-9wr9a5nn/run-9wr9a5nn.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/config.yaml b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d1072ab05c29d8b6edcb62657a452bb17e0e6ee --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/config.yaml @@ -0,0 +1,283 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716987998 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step60000,tokenizer=huggyllama/llama-13b + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/media/table/evaluation/eval_results_1_84371b9cfbbf2990fca7.table.json b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/media/table/evaluation/eval_results_1_84371b9cfbbf2990fca7.table.json new file mode 100644 index 0000000000000000000000000000000000000000..8e341bcc7724a1173ce5e6a4cbc000417b6ed671 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/media/table/evaluation/eval_results_1_84371b9cfbbf2990fca7.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.5138121546961326", "0.0140"], ["sst2", 1.0, "none", 0, "acc", "0.5263761467889908", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5255712731229597", "0.0117"], ["piqa", 1.0, "none", 0, "acc_norm", "0.4972796517954298", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.4215686274509804", "0.0245"], ["mrpc", 1.0, "none", 0, "f1", "0.44339622641509435", "0.0301"], ["copa", 1.0, "none", 0, "acc", "0.67", "0.0473"], ["boolq", 2.0, "none", 0, "acc", "0.4944954128440367", "0.0087"], ["arc_easy", 1.0, "none", 0, "acc", "0.25462962962962965", "0.0089"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.25673400673400676", "0.0090"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/output.log b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..66eebda4593e04f6b1ad575dcbf798e139257f27 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130638-brbqr35q/files/output.log @@ -0,0 +1,483 @@ + +2024-05-29:13:06:39,497 INFO [__main__.py:251] Verbosity set to INFO +2024-05-29:13:06:48,729 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-29:13:06:48,731 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-29:13:06:48,731 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step60000', 'tokenizer': 'huggyllama/llama-13b'} +2024-05-29:13:06:51,049 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 16.4MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 4.01MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 4.44MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 1.13MB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 102765.54 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 333177.30 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 189652.01 examples/s] +2024-05-29:13:07:21,695 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:21,695 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 40.2MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 30.4MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 84.4MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 22036.01 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22219.08 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 22569.17 examples/s] +2024-05-29:13:07:25,359 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:25,359 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 42.2MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16228.53 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 12549.15 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 17238.24 examples/s] +2024-05-29:13:07:27,697 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:27,698 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-29:13:07:27,698 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-29:13:07:27,698 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 38.5MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 4.24MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 206kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.11MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 398526.24 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 175353.63 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 363339.25 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 12.1MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.4MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.20MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 19.1MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23831.60 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24653.90 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 23673.27 examples/s] +2024-05-29:13:07:38,891 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:38,894 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 19.5MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 500kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 980kB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1393418.60 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 389752.03 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 522173.21 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 13.5MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 20.9MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 7.01MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 24364.91 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24313.25 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23814.51 examples/s] +2024-05-29:13:07:55,712 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 69557.37it/s] +2024-05-29:13:07:55,797 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2565.66it/s] +2024-05-29:13:07:56,167 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1075.99it/s] +2024-05-29:13:07:57,948 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1869.22it/s] +2024-05-29:13:07:58,185 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 60787.01it/s] +2024-05-29:13:07:58,194 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1976.06it/s] +2024-05-29:13:07:59,981 INFO [task.py:395] Building contexts for arc_easy on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1064.97it/s] +2024-05-29:13:08:02,362 INFO [evaluator.py:379] Running loglikelihood requests +Running loglikelihood requests: 0%| | 0/25011 [00:00 + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 237, in __init__ + self._create_tokenizer( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 630, in _create_tokenizer + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained + config = AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 644, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 699, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 360, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/ does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k//main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7391d0fb5302364497bd6017486c36f0caae0613 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt @@ -0,0 +1,154 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.2 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.15.2 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.36.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..75a028a32d1db6eb90cdef2a97aab5e2d37869d4 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T06:46:13.040474", + "startedAt": "2024-05-30T06:46:12.550140", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step10000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-5-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2326.26113125, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3356.651, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3356.655, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3356.677, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.92833709716797 + } + }, + "memory": { + "total": 1007.4379844665527 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..0396467f7569a8166ce6a4890676d52689b450a7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 38}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..0af65cfe582f3ea45d3b10681420ae879f7f983a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug-internal.log @@ -0,0 +1,194 @@ +2024-05-30 06:46:12,573 INFO StreamThr :899 [internal.py:wandb_internal():85] W&B internal server running at pid: 899, started at: 2024-05-30 06:46:12.571028 +2024-05-30 06:46:12,578 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status +2024-05-30 06:46:12,579 INFO WriterThread:899 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/run-gwntrxmv.wandb +2024-05-30 06:46:12,581 DEBUG SenderThread:899 [sender.py:send():378] send: header +2024-05-30 06:46:12,583 DEBUG SenderThread:899 [sender.py:send():378] send: run +2024-05-30 06:46:12,839 INFO SenderThread:899 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files +2024-05-30 06:46:12,839 INFO SenderThread:899 [sender.py:_start_run_threads():1123] run started: gwntrxmv with start time 1717051572.571195 +2024-05-30 06:46:12,844 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 06:46:12,844 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: check_version +2024-05-30 06:46:12,962 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 06:46:12,964 DEBUG HandlerThread:899 [system_info.py:__init__():26] System info init +2024-05-30 06:46:12,965 DEBUG HandlerThread:899 [system_info.py:__init__():41] System info init done +2024-05-30 06:46:12,965 INFO HandlerThread:899 [system_monitor.py:start():194] Starting system monitor +2024-05-30 06:46:12,965 INFO SystemMonitor:899 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 06:46:12,965 INFO HandlerThread:899 [system_monitor.py:probe():214] Collecting system info +2024-05-30 06:46:12,972 INFO SystemMonitor:899 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 06:46:12,972 INFO SystemMonitor:899 [interfaces.py:start():188] Started disk monitoring +2024-05-30 06:46:12,973 INFO SystemMonitor:899 [interfaces.py:start():188] Started memory monitoring +2024-05-30 06:46:12,973 INFO SystemMonitor:899 [interfaces.py:start():188] Started network monitoring +2024-05-30 06:46:13,040 DEBUG HandlerThread:899 [system_info.py:probe():150] Probing system +2024-05-30 06:46:13,043 DEBUG HandlerThread:899 [system_info.py:_probe_git():135] Probing git +2024-05-30 06:46:13,053 ERROR HandlerThread:899 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 06:46:13,053 DEBUG HandlerThread:899 [system_info.py:_probe_git():143] Probing git done +2024-05-30 06:46:13,053 DEBUG HandlerThread:899 [system_info.py:probe():198] Probing system done +2024-05-30 06:46:13,054 DEBUG HandlerThread:899 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T06:46:13.040474', 'startedAt': '2024-05-30T06:46:12.550140', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step10000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-5-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2326.26113125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3356.651, 'min': 800.0, 'max': 3400.0}, {'current': 3356.655, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3356.677, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.92833709716797}}, 'memory': {'total': 1007.4379844665527}} +2024-05-30 06:46:13,054 INFO HandlerThread:899 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 06:46:13,054 INFO HandlerThread:899 [system_monitor.py:probe():227] Publishing system info +2024-05-30 06:46:13,058 INFO HandlerThread:899 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 06:46:13,065 DEBUG SenderThread:899 [sender.py:send():378] send: files +2024-05-30 06:46:13,065 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 06:46:13,240 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 06:46:13,241 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: python_packages +2024-05-30 06:46:13,241 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:13,242 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:13,349 DEBUG SenderThread:899 [sender.py:send():378] send: telemetry +2024-05-30 06:46:13,611 INFO wandb-upload_0:899 [upload_job.py:push():130] Uploaded file /tmp/tmpn_z2j8r3wandb/0t5ehw69-wandb-metadata.json +2024-05-30 06:46:13,841 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt +2024-05-30 06:46:13,842 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:13,842 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-metadata.json +2024-05-30 06:46:15,841 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:18,354 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:23,496 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:23,847 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:25,850 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:28,242 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:28,242 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:29,383 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:34,383 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:39,384 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:43,242 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 06:46:43,242 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: stop_status +2024-05-30 06:46:45,328 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:45,921 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/config.yaml +2024-05-30 06:46:49,925 INFO Thread-12 :899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:51,271 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:51,281 DEBUG SenderThread:899 [sender.py:send():378] send: exit +2024-05-30 06:46:51,281 INFO SenderThread:899 [sender.py:send_exit():585] handling exit code: 1 +2024-05-30 06:46:51,281 INFO SenderThread:899 [sender.py:send_exit():587] handling runtime: 38 +2024-05-30 06:46:51,282 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 06:46:51,282 INFO SenderThread:899 [sender.py:send_exit():593] send defer +2024-05-30 06:46:51,282 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,282 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-30 06:46:51,283 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,283 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-30 06:46:51,283 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 1 +2024-05-30 06:46:51,283 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,283 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-30 06:46:51,283 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,283 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-30 06:46:51,283 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 2 +2024-05-30 06:46:51,283 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,283 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-30 06:46:51,283 INFO HandlerThread:899 [system_monitor.py:finish():203] Stopping system monitor +2024-05-30 06:46:51,284 DEBUG SystemMonitor:899 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-30 06:46:51,284 DEBUG SystemMonitor:899 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-30 06:46:51,284 DEBUG SystemMonitor:899 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-30 06:46:51,286 INFO HandlerThread:899 [interfaces.py:finish():200] Joined cpu monitor +2024-05-30 06:46:51,286 INFO HandlerThread:899 [interfaces.py:finish():200] Joined disk monitor +2024-05-30 06:46:51,286 INFO HandlerThread:899 [interfaces.py:finish():200] Joined memory monitor +2024-05-30 06:46:51,286 INFO HandlerThread:899 [interfaces.py:finish():200] Joined network monitor +2024-05-30 06:46:51,287 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,287 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-30 06:46:51,287 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 3 +2024-05-30 06:46:51,287 DEBUG SenderThread:899 [sender.py:send():378] send: stats +2024-05-30 06:46:51,288 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,288 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-30 06:46:51,288 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,288 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-30 06:46:51,288 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 4 +2024-05-30 06:46:51,288 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,288 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-30 06:46:51,289 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,289 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-30 06:46:51,289 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 5 +2024-05-30 06:46:51,289 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,289 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-30 06:46:51,289 DEBUG SenderThread:899 [sender.py:send():378] send: summary +2024-05-30 06:46:51,290 INFO SenderThread:899 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 06:46:51,290 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,290 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-30 06:46:51,290 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 6 +2024-05-30 06:46:51,290 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,290 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-30 06:46:51,290 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,290 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-30 06:46:51,290 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 7 +2024-05-30 06:46:51,290 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 06:46:51,290 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:51,290 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-30 06:46:51,290 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:51,291 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-30 06:46:51,930 INFO Thread-12 :899 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json +2024-05-30 06:46:52,281 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:53,404 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 8 +2024-05-30 06:46:53,404 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:53,405 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:53,405 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-30 06:46:53,405 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:53,405 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-30 06:46:53,405 INFO SenderThread:899 [job_builder.py:build():432] Attempting to build job artifact +2024-05-30 06:46:53,406 INFO SenderThread:899 [job_builder.py:_get_source_type():576] no source found +2024-05-30 06:46:53,406 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 9 +2024-05-30 06:46:53,406 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:53,406 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-30 06:46:53,406 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:53,406 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-30 06:46:53,406 INFO SenderThread:899 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-30 06:46:53,931 INFO SenderThread:899 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:53,931 INFO SenderThread:899 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files +2024-05-30 06:46:53,932 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-metadata.json wandb-metadata.json +2024-05-30 06:46:53,932 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/config.yaml config.yaml +2024-05-30 06:46:53,932 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt requirements.txt +2024-05-30 06:46:53,934 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log output.log +2024-05-30 06:46:53,934 INFO SenderThread:899 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json wandb-summary.json +2024-05-30 06:46:53,934 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 10 +2024-05-30 06:46:53,935 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:53,935 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-30 06:46:53,935 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:53,935 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-30 06:46:53,935 INFO SenderThread:899 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 06:46:54,281 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:54,281 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:54,363 INFO wandb-upload_0:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/config.yaml +2024-05-30 06:46:54,590 INFO wandb-upload_1:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/requirements.txt +2024-05-30 06:46:54,598 INFO wandb-upload_3:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/wandb-summary.json +2024-05-30 06:46:54,604 INFO wandb-upload_2:899 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/files/output.log +2024-05-30 06:46:54,805 INFO Thread-11 (_thread_body):899 [sender.py:transition_state():613] send defer: 11 +2024-05-30 06:46:54,805 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:54,805 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-30 06:46:54,805 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:54,805 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-30 06:46:54,805 INFO SenderThread:899 [file_pusher.py:join():175] waiting for file pusher +2024-05-30 06:46:54,805 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 12 +2024-05-30 06:46:54,805 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:54,805 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-30 06:46:54,806 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:54,806 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-30 06:46:54,806 INFO SenderThread:899 [file_stream.py:finish():601] file stream finish called +2024-05-30 06:46:55,009 INFO SenderThread:899 [file_stream.py:finish():605] file stream finish is done +2024-05-30 06:46:55,009 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 13 +2024-05-30 06:46:55,009 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:55,009 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-30 06:46:55,009 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:55,010 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-30 06:46:55,010 INFO SenderThread:899 [sender.py:transition_state():613] send defer: 14 +2024-05-30 06:46:55,010 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: defer +2024-05-30 06:46:55,010 INFO HandlerThread:899 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-30 06:46:55,010 DEBUG SenderThread:899 [sender.py:send():378] send: final +2024-05-30 06:46:55,010 DEBUG SenderThread:899 [sender.py:send():378] send: footer +2024-05-30 06:46:55,010 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: defer +2024-05-30 06:46:55,010 INFO SenderThread:899 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-30 06:46:55,010 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:55,010 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:55,011 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 06:46:55,011 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: server_info +2024-05-30 06:46:55,011 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: get_summary +2024-05-30 06:46:55,011 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-30 06:46:55,011 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-30 06:46:55,011 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 06:46:55,012 DEBUG SenderThread:899 [sender.py:send_request():405] send_request: server_info +2024-05-30 06:46:55,063 INFO MainThread:899 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-30 06:46:55,063 INFO MainThread:899 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-30 06:46:55,063 INFO MainThread:899 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-30 06:46:55,064 DEBUG HandlerThread:899 [handler.py:handle_request():158] handle_request: shutdown +2024-05-30 06:46:55,064 INFO HandlerThread:899 [handler.py:finish():882] shutting down handler +2024-05-30 06:46:56,012 INFO WriterThread:899 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/run-gwntrxmv.wandb +2024-05-30 06:46:56,063 INFO SenderThread:899 [sender.py:finish():1545] shutting down sender +2024-05-30 06:46:56,063 INFO SenderThread:899 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 06:46:56,063 INFO SenderThread:899 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..443791795e938b20024dcfc30f756c93e5d2b679 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Configure stats pid to 743 +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 06:46:12,565 WARNING MainThread:743 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug.log +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/logs/debug-internal.log +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_init.py:init():560] calling init triggers +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 06:46:12,565 INFO MainThread:743 [wandb_init.py:init():610] starting backend +2024-05-30 06:46:12,566 INFO MainThread:743 [wandb_init.py:init():614] setting up manager +2024-05-30 06:46:12,570 INFO MainThread:743 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 06:46:12,570 INFO MainThread:743 [wandb_init.py:init():622] backend started and connected +2024-05-30 06:46:12,574 INFO MainThread:743 [wandb_init.py:init():711] updated telemetry +2024-05-30 06:46:12,582 INFO MainThread:743 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 06:46:12,844 INFO MainThread:743 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 06:46:12,956 INFO MainThread:743 [wandb_run.py:_on_init():2405] got version response +2024-05-30 06:46:12,956 INFO MainThread:743 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 06:46:13,242 INFO MainThread:743 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 06:46:13,242 INFO MainThread:743 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 06:46:13,242 INFO MainThread:743 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 06:46:13,242 INFO MainThread:743 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 06:46:13,244 INFO MainThread:743 [wandb_init.py:init():838] run started, returning control to user process +2024-05-30 06:46:56,064 WARNING MsgRouterThr:743 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/run-gwntrxmv.wandb b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/run-gwntrxmv.wandb new file mode 100644 index 0000000000000000000000000000000000000000..5ea572dfee7833c77d6204b83479342b5ae0f890 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240530_064612-gwntrxmv/run-gwntrxmv.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb b/lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb new file mode 100644 index 0000000000000000000000000000000000000000..528de96747f14b5a2fc396d8afda8947f0719fc7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1bae7e11bfd01f7e28e5317961fb5a9c6615a0ae489b6b111eac79aa5b84365 +size 1423594 diff --git a/lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb b/lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb new file mode 100644 index 0000000000000000000000000000000000000000..b2ce0662a15238ec44bf948b9f4a0cfe667b27f7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a2c21c9d3e17b1723c83f8f0c354c5b96a2dfbb6d8c4ec367ee621c1747d76b +size 1449115 diff --git a/venv/lib/python3.10/site-packages/responses/__init__.py b/venv/lib/python3.10/site-packages/responses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..43d270e36cb14c59a34a3f73e11fa6ea62d83ad0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/__init__.py @@ -0,0 +1,852 @@ +from __future__ import absolute_import, print_function, division, unicode_literals + +import _io +from http import client +from http import cookies +import json as json_module +import logging +import re +from itertools import groupby + + +from collections import namedtuple +from functools import wraps +from requests.adapters import HTTPAdapter +from requests.exceptions import ConnectionError +from requests.utils import cookiejar_from_dict +from responses.matchers import json_params_matcher as _json_params_matcher +from responses.matchers import urlencoded_params_matcher as _urlencoded_params_matcher +from responses.registries import FirstMatchRegistry +from responses.matchers import query_string_matcher as _query_string_matcher +from warnings import warn + +from collections.abc import Sequence, Sized + +try: + from requests.packages.urllib3.response import HTTPResponse +except ImportError: # pragma: no cover + from urllib3.response import HTTPResponse # pragma: no cover +try: + from requests.packages.urllib3.connection import HTTPHeaderDict +except ImportError: # pragma: no cover + from urllib3.response import HTTPHeaderDict # pragma: no cover +try: + from requests.packages.urllib3.util.url import parse_url +except ImportError: # pragma: no cover + from urllib3.util.url import parse_url # pragma: no cover + + +from urllib.parse import ( + urlparse, + urlunparse, + parse_qsl, + urlsplit, + urlunsplit, + quote, +) + +from io import BytesIO as BufferIO + +from unittest import mock as std_mock + + +Pattern = re.Pattern + +UNSET = object() + +Call = namedtuple("Call", ["request", "response"]) + +_real_send = HTTPAdapter.send + +logger = logging.getLogger("responses") + + +class FalseBool: + # used for backwards compatibility, see + # https://github.com/getsentry/responses/issues/464 + def __bool__(self): + return False + + __nonzero__ = __bool__ + + +def urlencoded_params_matcher(params): + warn( + "Function is deprecated. Use 'from responses.matchers import urlencoded_params_matcher'", + DeprecationWarning, + ) + return _urlencoded_params_matcher(params) + + +def json_params_matcher(params): + warn( + "Function is deprecated. Use 'from responses.matchers import json_params_matcher'", + DeprecationWarning, + ) + return _json_params_matcher(params) + + +def _has_unicode(s): + return any(ord(char) > 128 for char in s) + + +def _clean_unicode(url): + # Clean up domain names, which use punycode to handle unicode chars + urllist = list(urlsplit(url)) + netloc = urllist[1] + if _has_unicode(netloc): + domains = netloc.split(".") + for i, d in enumerate(domains): + if _has_unicode(d): + d = "xn--" + d.encode("punycode").decode("ascii") + domains[i] = d + urllist[1] = ".".join(domains) + url = urlunsplit(urllist) + + # Clean up path/query/params, which use url-encoding to handle unicode chars + chars = list(url) + for i, x in enumerate(chars): + if ord(x) > 128: + chars[i] = quote(x) + + return "".join(chars) + + +def _cookies_from_headers(headers): + resp_cookie = cookies.SimpleCookie() + resp_cookie.load(headers["set-cookie"]) + cookies_dict = {name: v.value for name, v in resp_cookie.items()} + + return cookiejar_from_dict(cookies_dict) + + +def get_wrapped(func, responses, registry=None): + if registry is not None: + responses._set_registry(registry) + + @wraps(func) + def wrapper(*args, **kwargs): + with responses: + return func(*args, **kwargs) + + return wrapper + + +class CallList(Sequence, Sized): + def __init__(self): + self._calls = [] + + def __iter__(self): + return iter(self._calls) + + def __len__(self): + return len(self._calls) + + def __getitem__(self, idx): + return self._calls[idx] + + def add(self, request, response): + self._calls.append(Call(request, response)) + + def reset(self): + self._calls = [] + + +def _ensure_url_default_path(url): + if isinstance(url, str): + url_parts = list(urlsplit(url)) + if url_parts[2] == "": + url_parts[2] = "/" + url = urlunsplit(url_parts) + return url + + +def _get_url_and_path(url): + url_parsed = urlparse(url) + url_and_path = urlunparse( + [url_parsed.scheme, url_parsed.netloc, url_parsed.path, None, None, None] + ) + return parse_url(url_and_path).url + + +def _handle_body(body): + if isinstance(body, str): + body = body.encode("utf-8") + if isinstance(body, _io.BufferedReader): + return body + + data = BufferIO(body) + + def is_closed(): + """ + Real Response uses HTTPResponse as body object. + Thus, when method is_closed is called first to check if there is any more + content to consume and the file-like object is still opened + + This method ensures stability to work for both: + https://github.com/getsentry/responses/issues/438 + https://github.com/getsentry/responses/issues/394 + + where file should be intentionally be left opened to continue consumption + """ + if not data.closed and data.read(1): + # if there is more bytes to read then keep open, but return pointer + data.seek(-1, 1) + return False + else: + if not data.closed: + # close but return False to mock like is still opened + data.close() + return False + + # only if file really closed (by us) return True + return True + + data.isclosed = is_closed + return data + + +class BaseResponse(object): + passthrough = False + content_type = None + headers = None + + stream = False + + def __init__(self, method, url, match_querystring=None, match=()): + self.method = method + # ensure the url has a default path set if the url is a string + self.url = _ensure_url_default_path(url) + + if self._should_match_querystring(match_querystring): + match = tuple(match) + (_query_string_matcher(urlparse(self.url).query),) + + self.match = match + self.call_count = 0 + + def __eq__(self, other): + if not isinstance(other, BaseResponse): + return False + + if self.method != other.method: + return False + + # Can't simply do an equality check on the objects directly here since __eq__ isn't + # implemented for regex. It might seem to work as regex is using a cache to return + # the same regex instances, but it doesn't in all cases. + self_url = self.url.pattern if isinstance(self.url, Pattern) else self.url + other_url = other.url.pattern if isinstance(other.url, Pattern) else other.url + + return self_url == other_url + + def __ne__(self, other): + return not self.__eq__(other) + + def _should_match_querystring(self, match_querystring_argument): + if isinstance(self.url, Pattern): + # the old default from <= 0.9.0 + return False + + if match_querystring_argument is not None: + if not isinstance(match_querystring_argument, FalseBool): + warn( + ( + "Argument 'match_querystring' is deprecated. " + "Use 'responses.matchers.query_param_matcher' or " + "'responses.matchers.query_string_matcher'" + ), + DeprecationWarning, + ) + return match_querystring_argument + + return bool(urlparse(self.url).query) + + def _url_matches(self, url, other): + if isinstance(url, str): + if _has_unicode(url): + url = _clean_unicode(url) + + return _get_url_and_path(url) == _get_url_and_path(other) + + elif isinstance(url, Pattern) and url.match(other): + return True + + else: + return False + + @staticmethod + def _req_attr_matches(match, request): + for matcher in match: + valid, reason = matcher(request) + if not valid: + return False, reason + + return True, "" + + def get_headers(self): + headers = HTTPHeaderDict() # Duplicate headers are legal + if self.content_type is not None: + headers["Content-Type"] = self.content_type + if self.headers: + headers.extend(self.headers) + return headers + + def get_response(self, request): + raise NotImplementedError + + def matches(self, request): + if request.method != self.method: + return False, "Method does not match" + + if not self._url_matches(self.url, request.url): + return False, "URL does not match" + + valid, reason = self._req_attr_matches(self.match, request) + if not valid: + return False, reason + + return True, "" + + +class Response(BaseResponse): + def __init__( + self, + method, + url, + body="", + json=None, + status=200, + headers=None, + stream=None, + content_type=UNSET, + auto_calculate_content_length=False, + **kwargs + ): + # if we were passed a `json` argument, + # override the body and content_type + if json is not None: + assert not body + body = json_module.dumps(json) + if content_type is UNSET: + content_type = "application/json" + + if content_type is UNSET: + if isinstance(body, str) and _has_unicode(body): + content_type = "text/plain; charset=utf-8" + else: + content_type = "text/plain" + + self.body = body + self.status = status + self.headers = headers + + if stream is not None: + warn( + "stream argument is deprecated. Use stream parameter in request directly", + DeprecationWarning, + ) + + self.stream = stream + self.content_type = content_type + self.auto_calculate_content_length = auto_calculate_content_length + super(Response, self).__init__(method, url, **kwargs) + + def get_response(self, request): + if self.body and isinstance(self.body, Exception): + raise self.body + + headers = self.get_headers() + status = self.status + body = _handle_body(self.body) + + if ( + self.auto_calculate_content_length + and isinstance(body, BufferIO) + and "Content-Length" not in headers + ): + content_length = len(body.getvalue()) + headers["Content-Length"] = str(content_length) + + return HTTPResponse( + status=status, + reason=client.responses.get(status, None), + body=body, + headers=headers, + original_response=OriginalResponseShim(headers), + preload_content=False, + ) + + def __repr__(self): + return ( + "".format( + url=self.url, + status=self.status, + content_type=self.content_type, + headers=json_module.dumps(self.headers), + ) + ) + + +class CallbackResponse(BaseResponse): + def __init__( + self, method, url, callback, stream=None, content_type="text/plain", **kwargs + ): + self.callback = callback + + if stream is not None: + warn( + "stream argument is deprecated. Use stream parameter in request directly", + DeprecationWarning, + ) + self.stream = stream + self.content_type = content_type + super(CallbackResponse, self).__init__(method, url, **kwargs) + + def get_response(self, request): + headers = self.get_headers() + + result = self.callback(request) + if isinstance(result, Exception): + raise result + + status, r_headers, body = result + if isinstance(body, Exception): + raise body + + # If the callback set a content-type remove the one + # set in add_callback() so that we don't have multiple + # content type values. + has_content_type = False + if isinstance(r_headers, dict) and "Content-Type" in r_headers: + has_content_type = True + elif isinstance(r_headers, list): + has_content_type = any( + [h for h in r_headers if h and h[0].lower() == "content-type"] + ) + if has_content_type: + headers.pop("Content-Type", None) + + body = _handle_body(body) + headers.extend(r_headers) + + return HTTPResponse( + status=status, + reason=client.responses.get(status, None), + body=body, + headers=headers, + original_response=OriginalResponseShim(headers), + preload_content=False, + ) + + +class PassthroughResponse(BaseResponse): + passthrough = True + + +class OriginalResponseShim(object): + """ + Shim for compatibility with older versions of urllib3 + + requests cookie handling depends on responses having a property chain of + `response._original_response.msg` which contains the response headers [1] + + Using HTTPResponse() for this purpose causes compatibility errors with + urllib3<1.23.0. To avoid adding more dependencies we can use this shim. + + [1]: https://github.com/psf/requests/blob/75bdc998e2d/requests/cookies.py#L125 + """ + + def __init__(self, headers): + self.msg = headers + + def isclosed(self): + return True + + def close(self): + return + + +class RequestsMock(object): + DELETE = "DELETE" + GET = "GET" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + PATCH = "PATCH" + POST = "POST" + PUT = "PUT" + response_callback = None + + def __init__( + self, + assert_all_requests_are_fired=True, + response_callback=None, + passthru_prefixes=(), + target="requests.adapters.HTTPAdapter.send", + registry=FirstMatchRegistry, + ): + self._calls = CallList() + self.reset() + self._registry = registry() # call only after reset + self.assert_all_requests_are_fired = assert_all_requests_are_fired + self.response_callback = response_callback + self.passthru_prefixes = tuple(passthru_prefixes) + self.target = target + self._patcher = None + + def _get_registry(self): + return self._registry + + def _set_registry(self, new_registry): + if self.registered(): + err_msg = ( + "Cannot replace Registry, current registry has responses.\n" + "Run 'responses.registry.reset()' first" + ) + raise AttributeError(err_msg) + + self._registry = new_registry() + + def reset(self): + self._registry = FirstMatchRegistry() + self._calls.reset() + self.passthru_prefixes = () + + def add( + self, + method=None, # method or ``Response`` + url=None, + body="", + adding_headers=None, + *args, + **kwargs + ): + """ + >>> import responses + + A basic request: + >>> responses.add(responses.GET, 'http://example.com') + + You can also directly pass an object which implements the + ``BaseResponse`` interface: + + >>> responses.add(Response(...)) + + A JSON payload: + + >>> responses.add( + >>> method='GET', + >>> url='http://example.com', + >>> json={'foo': 'bar'}, + >>> ) + + Custom headers: + + >>> responses.add( + >>> method='GET', + >>> url='http://example.com', + >>> headers={'X-Header': 'foo'}, + >>> ) + + """ + if isinstance(method, BaseResponse): + self._registry.add(method) + return + + if adding_headers is not None: + kwargs.setdefault("headers", adding_headers) + + self._registry.add(Response(method=method, url=url, body=body, **kwargs)) + + def add_passthru(self, prefix): + """ + Register a URL prefix or regex to passthru any non-matching mock requests to. + + For example, to allow any request to 'https://example.com', but require + mocks for the remainder, you would add the prefix as so: + + >>> import responses + >>> responses.add_passthru('https://example.com') + + Regex can be used like: + + >>> responses.add_passthru(re.compile('https://example.com/\\w+')) + """ + if not isinstance(prefix, Pattern) and _has_unicode(prefix): + prefix = _clean_unicode(prefix) + self.passthru_prefixes += (prefix,) + + def remove(self, method_or_response=None, url=None): + """ + Removes a response previously added using ``add()``, identified + either by a response object inheriting ``BaseResponse`` or + ``method`` and ``url``. Removes all matching responses. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org') + >>> responses.remove(responses.GET, 'http://example.org') + """ + if isinstance(method_or_response, BaseResponse): + response = method_or_response + else: + response = BaseResponse(method=method_or_response, url=url) + + self._registry.remove(response) + + def replace(self, method_or_response=None, url=None, body="", *args, **kwargs): + """ + Replaces a response previously added using ``add()``. The signature + is identical to ``add()``. The response is identified using ``method`` + and ``url``, and the first matching response is replaced. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org', json={'data': 1}) + >>> responses.replace(responses.GET, 'http://example.org', json={'data': 2}) + """ + if isinstance(method_or_response, BaseResponse): + url = method_or_response.url + response = method_or_response + else: + response = Response(method=method_or_response, url=url, body=body, **kwargs) + + self._registry.replace(response) + + def upsert(self, method_or_response=None, url=None, body="", *args, **kwargs): + """ + Replaces a response previously added using ``add()``, or adds the response + if no response exists. Responses are matched using ``method``and ``url``. + The first matching response is replaced. + + >>> import responses + >>> responses.add(responses.GET, 'http://example.org', json={'data': 1}) + >>> responses.upsert(responses.GET, 'http://example.org', json={'data': 2}) + """ + try: + self.replace(method_or_response, url, body, *args, **kwargs) + except ValueError: + self.add(method_or_response, url, body, *args, **kwargs) + + def add_callback( + self, + method, + url, + callback, + match_querystring=FalseBool(), + content_type="text/plain", + match=(), + ): + # ensure the url has a default path set if the url is a string + # url = _ensure_url_default_path(url, match_querystring) + + self._registry.add( + CallbackResponse( + url=url, + method=method, + callback=callback, + content_type=content_type, + match_querystring=match_querystring, + match=match, + ) + ) + + def registered(self): + return self._registry.registered + + @property + def calls(self): + return self._calls + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + success = type is None + self.stop(allow_assert=success) + self.reset() + return success + + def activate(self, func=None, registry=None): + if func is not None: + return get_wrapped(func, self) + + def deco_activate(func): + return get_wrapped(func, self, registry) + + return deco_activate + + def _find_match(self, request): + """ + Iterates through all available matches and validates if any of them matches the request + + :param request: (PreparedRequest), request object + :return: + (Response) found match. If multiple found, then remove & return the first match. + (list) list with reasons why other matches don't match + """ + return self._registry.find(request) + + def _parse_request_params(self, url): + params = {} + for key, val in groupby(parse_qsl(urlparse(url).query), lambda kv: kv[0]): + values = list(map(lambda x: x[1], val)) + if len(values) == 1: + values = values[0] + params[key] = values + return params + + def _on_request(self, adapter, request, **kwargs): + # add attributes params and req_kwargs to 'request' object for further match comparison + # original request object does not have these attributes + request.params = self._parse_request_params(request.path_url) + request.req_kwargs = kwargs + + match, match_failed_reasons = self._find_match(request) + resp_callback = self.response_callback + + if match is None: + if any( + [ + p.match(request.url) + if isinstance(p, Pattern) + else request.url.startswith(p) + for p in self.passthru_prefixes + ] + ): + logger.info("request.allowed-passthru", extra={"url": request.url}) + return _real_send(adapter, request, **kwargs) + + error_msg = ( + "Connection refused by Responses - the call doesn't " + "match any registered mock.\n\n" + "Request: \n" + "- %s %s\n\n" + "Available matches:\n" % (request.method, request.url) + ) + for i, m in enumerate(self.registered()): + error_msg += "- {} {} {}\n".format( + m.method, m.url, match_failed_reasons[i] + ) + + response = ConnectionError(error_msg) + response.request = request + + self._calls.add(request, response) + response = resp_callback(response) if resp_callback else response + raise response + + if match.passthrough: + logger.info("request.passthrough-response", extra={"url": request.url}) + response = _real_send(adapter, request, **kwargs) + else: + try: + response = adapter.build_response(request, match.get_response(request)) + except BaseException as response: + match.call_count += 1 + self._calls.add(request, response) + response = resp_callback(response) if resp_callback else response + raise + + response = resp_callback(response) if resp_callback else response + match.call_count += 1 + self._calls.add(request, response) + return response + + def start(self): + def unbound_on_send(adapter, request, *a, **kwargs): + return self._on_request(adapter, request, *a, **kwargs) + + self._patcher = std_mock.patch(target=self.target, new=unbound_on_send) + self._patcher.start() + + def stop(self, allow_assert=True): + self._patcher.stop() + if not self.assert_all_requests_are_fired: + return + + if not allow_assert: + return + + not_called = [m for m in self.registered() if m.call_count == 0] + if not_called: + raise AssertionError( + "Not all requests have been executed {0!r}".format( + [(match.method, match.url) for match in not_called] + ) + ) + + def assert_call_count(self, url, count): + call_count = len( + [ + 1 + for call in self.calls + if call.request.url == _ensure_url_default_path(url) + ] + ) + if call_count == count: + return True + else: + raise AssertionError( + "Expected URL '{0}' to be called {1} times. Called {2} times.".format( + url, count, call_count + ) + ) + + +# expose default mock namespace +mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False) +__all__ = [ + "CallbackResponse", + "Response", + "RequestsMock", + # Exposed by the RequestsMock class: + "activate", + "add", + "add_callback", + "add_passthru", + "assert_all_requests_are_fired", + "assert_call_count", + "calls", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "passthru_prefixes", + "PATCH", + "POST", + "PUT", + "registered", + "remove", + "replace", + "reset", + "response_callback", + "start", + "stop", + "target", + "upsert", +] + +activate = _default_mock.activate +add = _default_mock.add +add_callback = _default_mock.add_callback +add_passthru = _default_mock.add_passthru +assert_all_requests_are_fired = _default_mock.assert_all_requests_are_fired +assert_call_count = _default_mock.assert_call_count +calls = _default_mock.calls +DELETE = _default_mock.DELETE +GET = _default_mock.GET +HEAD = _default_mock.HEAD +OPTIONS = _default_mock.OPTIONS +passthru_prefixes = _default_mock.passthru_prefixes +PATCH = _default_mock.PATCH +POST = _default_mock.POST +PUT = _default_mock.PUT +registered = _default_mock.registered +remove = _default_mock.remove +replace = _default_mock.replace +reset = _default_mock.reset +response_callback = _default_mock.response_callback +start = _default_mock.start +stop = _default_mock.stop +target = _default_mock.target +upsert = _default_mock.upsert diff --git a/venv/lib/python3.10/site-packages/responses/__init__.pyi b/venv/lib/python3.10/site-packages/responses/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..183f452b3ec66c89af9f97027e8ed1e7d1c08708 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/__init__.pyi @@ -0,0 +1,352 @@ +from collections import Sequence, Sized +from typing import ( + Any, + Callable, + Iterator, + Mapping, + Optional, + NamedTuple, + Protocol, + TypeVar, + Dict, + List, + Tuple, + Union, + Iterable, + overload, + Type +) + +from io import BufferedReader, BytesIO +from re import Pattern +from requests.adapters import HTTPResponse, PreparedRequest +from requests.cookies import RequestsCookieJar +from typing_extensions import Literal +from unittest import mock as std_mock +from urllib.parse import quote as quote +from urllib3.response import HTTPHeaderDict # type: ignore # Not currently exposed in typestubs. + +from .matchers import urlencoded_params_matcher, json_params_matcher + + +def _clean_unicode(url: str) -> str: ... +def _cookies_from_headers(headers: Dict[str, str]) -> RequestsCookieJar: ... +def _ensure_str(s: str) -> str: ... +def _ensure_url_default_path( + url: Union[Pattern[str], str] +) -> Union[Pattern[str], str]: ... +def _get_url_and_path(url: str) -> str: ... +def _handle_body( + body: Optional[Union[bytes, BufferedReader, str]] +) -> Union[BufferedReader, BytesIO]: ... +def _has_unicode(s: str) -> bool: ... +def _is_string(s: Union[Pattern[str], str]) -> bool: ... +def get_wrapped( + func: Callable[..., Any], responses: RequestsMock, registry: Optional[Any] +) -> Callable[..., Any]: ... + + +class Call(NamedTuple): + request: PreparedRequest + response: Any + +_Body = Union[str, BaseException, "Response", BufferedReader, bytes] + +MatcherIterable = Iterable[Callable[[Any], Callable[..., Any]]] + +class CallList(Sequence[Call], Sized): + def __init__(self) -> None: + self._calls = List[Call] + ... + def __iter__(self) -> Iterator[Call]: ... + def __len__(self) -> int: ... + def __getitem__(self, idx: int) -> Call: ... # type: ignore [override] + def add(self, request: PreparedRequest, response: _Body) -> None: ... + def reset(self) -> None: ... + +class FalseBool: + def __bool__(self) -> bool: ... + +class BaseResponse: + passthrough: bool = ... + content_type: Optional[str] = ... + headers: Optional[Mapping[str, str]] = ... + stream: bool = ... + method: Any = ... + url: Any = ... + match_querystring: Any = ... + match: MatcherIterable = ... + call_count: int = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + match_querystring: Union[bool, object] = ..., + match: MatcherIterable = ..., + ) -> None: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def _req_attr_matches( + self, match: MatcherIterable, request: PreparedRequest + ) -> Tuple[bool, str]: ... + def _should_match_querystring( + self, match_querystring_argument: Union[bool, object] + ) -> bool: ... + def _url_matches( + self, url: Union[Pattern[str], str], other: str, match_querystring: bool = ... + ) -> bool: ... + def _url_matches_strict(self, url: str, other: str) -> bool: ... + def get_headers(self) -> HTTPHeaderDict: ... # type: ignore + def get_response(self, request: PreparedRequest) -> None: ... + def matches(self, request: PreparedRequest) -> Tuple[bool, str]: ... + +class Response(BaseResponse): + body: _Body = ... + status: int = ... + headers: Optional[Mapping[str, str]] = ... + stream: bool = ... + content_type: Optional[str] = ... + auto_calculate_content_length: bool = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: Optional[Mapping[str, str]] = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + auto_calculate_content_length: bool = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + def get_response( # type: ignore [override] + self, request: PreparedRequest + ) -> HTTPResponse: ... + +class CallbackResponse(BaseResponse): + callback: Callable[[Any], Any] = ... + stream: bool = ... + content_type: Optional[str] = ... + def __init__( + self, + method: str, + url: Union[Pattern[str], str], + callback: Callable[[Any], Any], + stream: bool = ..., + content_type: Optional[str] = ..., + match_querystring: Union[bool, FalseBool] = ..., + match: MatcherIterable = ..., + ) -> None: ... + def get_response( # type: ignore [override] + self, request: PreparedRequest + ) -> HTTPResponse: ... + +class PassthroughResponse(BaseResponse): + passthrough: bool = ... + +class OriginalResponseShim: + msg: Any = ... + def __init__( # type: ignore [no-any-unimported] + self, headers: HTTPHeaderDict + ) -> None: ... + def isclosed(self) -> bool: ... + +_F = TypeVar("_F", bound=Callable[..., Any]) + +class RequestsMock: + DELETE: Literal["DELETE"] + GET: Literal["GET"] + HEAD: Literal["HEAD"] + OPTIONS: Literal["OPTIONS"] + PATCH: Literal["PATCH"] + POST: Literal["POST"] + PUT: Literal["PUT"] + response_callback: Optional[Callable[[Any], Any]] = ... + assert_all_requests_are_fired: Any = ... + passthru_prefixes: Tuple[Union[str, Pattern[str]], ...] = ... + target: Any = ... + _matches: List[Any] + def __init__( + self, + assert_all_requests_are_fired: bool = ..., + response_callback: Optional[Callable[[Any], Any]] = ..., + passthru_prefixes: Tuple[str, ...] = ..., + target: str = ..., + registry: Any = ..., + ) -> None: + self._patcher = Callable[[Any], Any] + self._calls = CallList + ... + def reset(self) -> None: ... + add: _Add + add_passthru: _AddPassthru + def remove( + self, + method_or_response: Optional[Union[str, Response]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + ) -> None: ... + replace: _Replace + upsert: _Upsert + add_callback: _AddCallback + @property + def calls(self) -> CallList: ... + def __enter__(self) -> RequestsMock: ... + def __exit__(self, type: Any, value: Any, traceback: Any) -> bool: ... + def activate(self, func: Optional[_F], registry: Optional[Any]) -> _F: ... + def start(self) -> None: ... + def stop(self, allow_assert: bool = ...) -> None: ... + def assert_call_count(self, url: str, count: int) -> bool: ... + def registered(self) -> List[Any]: ... + def _set_registry(self, registry: Any) -> None: ... + def _get_registry(self) -> Any: ... + + +HeaderSet = Optional[Union[Mapping[str, str], List[Tuple[str, str]]]] + +class _Add(Protocol): + def __call__( + self, + method: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + auto_calculate_content_length: bool = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _AddCallback(Protocol): + def __call__( + self, + method: str, + url: Union[Pattern[str], str], + callback: Callable[[PreparedRequest], Union[Exception, Tuple[int, Mapping[str, str], _Body]]], + match_querystring: bool = ..., + content_type: Optional[str] = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _AddPassthru(Protocol): + def __call__( + self, prefix: Union[Pattern[str], str] + ) -> None: ... + +class _Remove(Protocol): + def __call__( + self, + method_or_response: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + ) -> None: ... + +class _Replace(Protocol): + def __call__( + self, + method_or_response: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _Upsert(Protocol): + def __call__( + self, + method: Optional[Union[str, BaseResponse]] = ..., + url: Optional[Union[Pattern[str], str]] = ..., + body: _Body = ..., + json: Optional[Any] = ..., + status: int = ..., + headers: HeaderSet = ..., + stream: bool = ..., + content_type: Optional[str] = ..., + adding_headers: HeaderSet = ..., + match_querystring: bool = ..., + match: MatcherIterable = ..., + ) -> None: ... + +class _Registered(Protocol): + def __call__(self) -> List[Response]: ... + + +class _Activate(Protocol): + # see https://github.com/getsentry/responses/pull/469 for more details + + @overload + def __call__(self, func: _F = ...) -> _F: ... + # use this overload for scenario when 'responses.activate' is used + + @overload + def __call__(self, registry: Type[Any] = ...) -> Callable[['_F'], '_F']: ... + # use this overload for scenario when 'responses.activate(registry=)' is used + + +activate: _Activate +add: _Add +add_callback: _AddCallback +add_passthru: _AddPassthru +assert_all_requests_are_fired: bool +assert_call_count: Callable[[str, int], bool] +calls: CallList +DELETE: Literal["DELETE"] +GET: Literal["GET"] +HEAD: Literal["HEAD"] +mock: RequestsMock +_default_mock: RequestsMock +OPTIONS: Literal["OPTIONS"] +passthru_prefixes: Tuple[str, ...] +PATCH: Literal["PATCH"] +POST: Literal["POST"] +PUT: Literal["PUT"] +registered: _Registered +remove: _Remove +replace: _Replace +reset: Callable[[], None] +response_callback: Callable[[Any], Any] +start: Callable[[], None] +stop: Callable[..., None] +target: Any +upsert: _Upsert + +__all__ = [ + "CallbackResponse", + "Response", + "RequestsMock", + # Exposed by the RequestsMock class: + "activate", + "add", + "add_callback", + "add_passthru", + "assert_all_requests_are_fired", + "assert_call_count", + "calls", + "DELETE", + "GET", + "HEAD", + "OPTIONS", + "passthru_prefixes", + "PATCH", + "POST", + "PUT", + "registered", + "remove", + "replace", + "reset", + "response_callback", + "start", + "stop", + "target", + "upsert", +] diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeddf2c24282ecf416ad848c93964258d555234a Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00bde7eac15f9d9cd26f19f7f31af7c250f26335 Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/matchers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8878ac4df46a6f7ad439229cea2ef7342cd2ce78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/registries.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd5df1a5cf0caa314e7845b66484cf4d82363f96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/test_matchers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4153479e0f62aec2d92a7c7c2ba9403ef1134092 Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/test_registries.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc b/venv/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3911b0d4dfcf9006582aa7e481e1fde350d73453 Binary files /dev/null and b/venv/lib/python3.10/site-packages/responses/__pycache__/test_responses.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/responses/matchers.py b/venv/lib/python3.10/site-packages/responses/matchers.py new file mode 100644 index 0000000000000000000000000000000000000000..893edc19206e637689e5016a9900afef6b472ecc --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/matchers.py @@ -0,0 +1,325 @@ +import json as json_module + +from requests import PreparedRequest +from urllib.parse import parse_qsl, urlparse +from requests.packages.urllib3.util.url import parse_url +from json.decoder import JSONDecodeError + + +def _create_key_val_str(input_dict): + """ + Returns string of format {'key': val, 'key2': val2} + Function is called recursively for nested dictionaries + + :param input_dict: dictionary to transform + :return: (str) reformatted string + """ + + def list_to_str(input_list): + """ + Convert all list items to string. + Function is called recursively for nested lists + """ + converted_list = [] + for item in sorted(input_list, key=lambda x: str(x)): + if isinstance(item, dict): + item = _create_key_val_str(item) + elif isinstance(item, list): + item = list_to_str(item) + + converted_list.append(str(item)) + list_str = ", ".join(converted_list) + return "[" + list_str + "]" + + items_list = [] + for key in sorted(input_dict.keys(), key=lambda x: str(x)): + val = input_dict[key] + if isinstance(val, dict): + val = _create_key_val_str(val) + elif isinstance(val, list): + val = list_to_str(input_list=val) + + items_list.append("{}: {}".format(key, val)) + + key_val_str = "{{{}}}".format(", ".join(items_list)) + return key_val_str + + +def urlencoded_params_matcher(params): + """ + Matches URL encoded data + + :param params: (dict) data provided to 'data' arg of request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_body = request.body + qsl_body = dict(parse_qsl(request_body)) if request_body else {} + params_dict = params or {} + valid = params is None if request_body is None else params_dict == qsl_body + if not valid: + reason = "request.body doesn't match: {} doesn't match {}".format( + _create_key_val_str(qsl_body), _create_key_val_str(params_dict) + ) + + return valid, reason + + return match + + +def json_params_matcher(params): + """ + Matches JSON encoded data + + :param params: (dict) JSON data provided to 'json' arg of request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_body = request.body + params_dict = params or {} + try: + if isinstance(request_body, bytes): + request_body = request_body.decode("utf-8") + json_body = json_module.loads(request_body) if request_body else {} + + valid = params is None if request_body is None else params_dict == json_body + + if not valid: + reason = "request.body doesn't match: {} doesn't match {}".format( + _create_key_val_str(json_body), _create_key_val_str(params_dict) + ) + + except JSONDecodeError: + valid = False + reason = ( + "request.body doesn't match: JSONDecodeError: Cannot parse request.body" + ) + + return valid, reason + + return match + + +def fragment_identifier_matcher(identifier): + def match(request): + reason = "" + url_fragment = urlparse(request.url).fragment + if identifier: + url_fragment_qsl = sorted(parse_qsl(url_fragment)) + identifier_qsl = sorted(parse_qsl(identifier)) + valid = identifier_qsl == url_fragment_qsl + else: + valid = not url_fragment + + if not valid: + reason = "URL fragment identifier is different: {} doesn't match {}".format( + identifier, url_fragment + ) + return valid, reason + + return match + + +def query_param_matcher(params): + """ + Matcher to match 'params' argument in request + + :param params: (dict), same as provided to request + :return: (func) matcher + """ + + def match(request): + reason = "" + request_params = request.params + request_params_dict = request_params or {} + params_dict = params or {} + valid = ( + params is None + if request_params is None + else params_dict == request_params_dict + ) + + if not valid: + reason = "Parameters do not match. {} doesn't match {}".format( + _create_key_val_str(request_params_dict), + _create_key_val_str(params_dict), + ) + + return valid, reason + + return match + + +def query_string_matcher(query): + """ + Matcher to match query string part of request + + :param query: (str), same as constructed by request + :return: (func) matcher + """ + + def match(request): + reason = "" + data = parse_url(request.url) + request_query = data.query + + request_qsl = sorted(parse_qsl(request_query)) if request_query else {} + matcher_qsl = sorted(parse_qsl(query)) if query else {} + + valid = not query if request_query is None else request_qsl == matcher_qsl + + if not valid: + reason = "Query string doesn't match. {} doesn't match {}".format( + _create_key_val_str(dict(request_qsl)), + _create_key_val_str(dict(matcher_qsl)), + ) + + return valid, reason + + return match + + +def request_kwargs_matcher(kwargs): + """ + Matcher to match keyword arguments provided to request + + :param kwargs: (dict), keyword arguments, same as provided to request + :return: (func) matcher + """ + + def match(request): + reason = "" + kwargs_dict = kwargs or {} + # validate only kwargs that were requested for comparison, skip defaults + request_kwargs = { + k: v for k, v in request.req_kwargs.items() if k in kwargs_dict + } + + valid = ( + not kwargs_dict + if not request_kwargs + else sorted(kwargs.items()) == sorted(request_kwargs.items()) + ) + + if not valid: + reason = "Arguments don't match: {} doesn't match {}".format( + _create_key_val_str(request_kwargs), _create_key_val_str(kwargs_dict) + ) + + return valid, reason + + return match + + +def multipart_matcher(files, data=None): + """ + Matcher to match 'multipart/form-data' content-type. + This function constructs request body and headers from provided 'data' and 'files' + arguments and compares to actual request + + :param files: (dict), same as provided to request + :param data: (dict), same as provided to request + :return: (func) matcher + """ + if not files: + raise TypeError("files argument cannot be empty") + + prepared = PreparedRequest() + prepared.headers = {"Content-Type": ""} + prepared.prepare_body(data=data, files=files) + + def get_boundary(content_type): + """ + Parse 'boundary' value from header. + + :param content_type: (str) headers["Content-Type"] value + :return: (str) boundary value + """ + if "boundary=" not in content_type: + return "" + + return content_type.split("boundary=")[1] + + def match(request): + reason = "multipart/form-data doesn't match. " + if "Content-Type" not in request.headers: + return False, reason + "Request is missing the 'Content-Type' header" + + request_boundary = get_boundary(request.headers["Content-Type"]) + prepared_boundary = get_boundary(prepared.headers["Content-Type"]) + + # replace boundary value in header and in body, since by default + # urllib3.filepost.encode_multipart_formdata dynamically calculates + # random boundary alphanumeric value + request_content_type = request.headers["Content-Type"] + prepared_content_type = prepared.headers["Content-Type"].replace( + prepared_boundary, request_boundary + ) + + request_body = request.body + prepared_body = prepared.body + + if isinstance(prepared_body, bytes): + # since headers always come as str, need to convert to bytes + prepared_boundary = prepared_boundary.encode("utf-8") + request_boundary = request_boundary.encode("utf-8") + + prepared_body = prepared_body.replace(prepared_boundary, request_boundary) + + headers_valid = prepared_content_type == request_content_type + if not headers_valid: + return ( + False, + reason + + "Request headers['Content-Type'] is different. {} isn't equal to {}".format( + request_content_type, prepared_content_type + ), + ) + + body_valid = prepared_body == request_body + if not body_valid: + return False, reason + "Request body differs. {} aren't equal {}".format( + request_body, prepared_body + ) + + return True, "" + + return match + + +def header_matcher(headers, strict_match=False): + """ + Matcher to match 'headers' argument in request using the responses library. + + Because ``requests`` will send several standard headers in addition to what + was specified by your code, request headers that are additional to the ones + passed to the matcher are ignored by default. You can change this behaviour + by passing ``strict_match=True``. + + :param headers: (dict), same as provided to request + :param strict_match: (bool), whether headers in addition to those specified + in the matcher should cause the match to fail. + :return: (func) matcher + """ + + def match(request): + request_headers = request.headers or {} + + if not strict_match: + # filter down to just the headers specified in the matcher + request_headers = {k: v for k, v in request_headers.items() if k in headers} + + valid = sorted(headers.items()) == sorted(request_headers.items()) + + if not valid: + return False, "Headers do not match: {} doesn't match {}".format( + _create_key_val_str(request_headers), _create_key_val_str(headers) + ) + + return valid, "" + + return match diff --git a/venv/lib/python3.10/site-packages/responses/matchers.pyi b/venv/lib/python3.10/site-packages/responses/matchers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..188de2e34896c79ba7249e97c158ef718a507717 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/matchers.pyi @@ -0,0 +1,44 @@ +from typing import ( + Any, + Callable, + Optional, + Dict, +) + +JSONDecodeError = ValueError + + +def _create_key_val_str(input_dict: Dict[Any, Any]) -> str: ... + +def json_params_matcher( + params: Optional[Dict[str, Any]] +) -> Callable[..., Any]: ... + +def urlencoded_params_matcher( + params: Optional[Dict[str, str]] +) -> Callable[..., Any]: ... + +def query_param_matcher( + params: Optional[Dict[str, str]] +) -> Callable[..., Any]: ... + +def query_string_matcher( + query: Optional[str] +) -> Callable[..., Any]: ... + +def request_kwargs_matcher( + kwargs: Optional[Dict[str, Any]] +) -> Callable[..., Any]: ... + +def multipart_matcher( + files: Dict[str, Any], data: Optional[Dict[str, str]] = ... +) -> Callable[..., Any]: ... + +def header_matcher( + headers: Dict[str, str], + strict_match: bool = ... +) -> Callable[..., Any]: ... + +def fragment_identifier_matcher( + identifier: Optional[str] +) -> Callable[..., Any]: ... diff --git a/venv/lib/python3.10/site-packages/responses/py.typed b/venv/lib/python3.10/site-packages/responses/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/responses/registries.py b/venv/lib/python3.10/site-packages/responses/registries.py new file mode 100644 index 0000000000000000000000000000000000000000..22f79519a1100db4a163d1615c4c3300824972e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/registries.py @@ -0,0 +1,63 @@ +from typing import ( + TYPE_CHECKING, + List, + Optional, + Tuple, +) + +if TYPE_CHECKING: # pragma: no cover + # import only for linter run + from requests import PreparedRequest + from responses import BaseResponse + + +class FirstMatchRegistry(object): + def __init__(self) -> None: + self._responses: List["BaseResponse"] = [] + + @property + def registered(self) -> List["BaseResponse"]: + return self._responses + + def reset(self) -> None: + self._responses = [] + + def find( + self, request: "PreparedRequest" + ) -> Tuple[Optional["BaseResponse"], List[str]]: + found = None + found_match = None + match_failed_reasons = [] + for i, response in enumerate(self.registered): + match_result, reason = response.matches(request) + if match_result: + if found is None: + found = i + found_match = response + else: + if self.registered[found].call_count > 0: + # that assumes that some responses were added between calls + self.registered.pop(found) + found_match = response + break + # Multiple matches found. Remove & return the first response. + return self.registered.pop(found), match_failed_reasons + else: + match_failed_reasons.append(reason) + return found_match, match_failed_reasons + + def add(self, response: "BaseResponse") -> None: + self.registered.append(response) + + def remove(self, response: "BaseResponse") -> None: + while response in self.registered: + self.registered.remove(response) + + def replace(self, response: "BaseResponse") -> None: + try: + index = self.registered.index(response) + except ValueError: + raise ValueError( + "Response is not registered for URL {}".format(response.url) + ) + self.registered[index] = response diff --git a/venv/lib/python3.10/site-packages/responses/test_matchers.py b/venv/lib/python3.10/site-packages/responses/test_matchers.py new file mode 100644 index 0000000000000000000000000000000000000000..d061d97b98b2c09f65c4e7105b3d70a8e018cff4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/test_matchers.py @@ -0,0 +1,625 @@ +from __future__ import absolute_import, print_function, division, unicode_literals + +import pytest +import requests +import responses +from requests.exceptions import ConnectionError +from responses import matchers + + +def assert_response(resp, body=None, content_type="text/plain"): + assert resp.status_code == 200 + assert resp.reason == "OK" + assert resp.headers["Content-Type"] == content_type + assert resp.text == body + + +def assert_reset(): + assert len(responses._default_mock.registered()) == 0 + assert len(responses.calls) == 0 + + +def test_query_string_matcher(): + @responses.activate + def run(): + url = "http://example.com?test=1&foo=bar" + responses.add( + responses.GET, + url, + body=b"test", + match=[matchers.query_string_matcher("test=1&foo=bar")], + ) + resp = requests.get("http://example.com?test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?foo=bar&test=1") + assert_response(resp, "test") + resp = requests.get("http://example.com/?foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_request_matches_post_params(): + @responses.activate + def run(deprecated): + if deprecated: + json_params_matcher = getattr(responses, "json_params_matcher") + urlencoded_params_matcher = getattr(responses, "urlencoded_params_matcher") + else: + json_params_matcher = matchers.json_params_matcher + urlencoded_params_matcher = matchers.urlencoded_params_matcher + + responses.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[json_params_matcher({"page": {"name": "first", "type": "json"}})], + ) + responses.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[urlencoded_params_matcher({"page": "second", "type": "urlencoded"})], + ) + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + data={"page": "second", "type": "urlencoded"}, + ) + assert_response(resp, "two") + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "application/json"}, + json={"page": {"name": "first", "type": "json"}}, + ) + assert_response(resp, "one") + + with pytest.deprecated_call(): + run(deprecated=True) + assert_reset() + + run(deprecated=False) + assert_reset() + + +def test_request_matches_empty_body(): + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: + # test that both json and urlencoded body are empty in matcher and in request + rsps.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[matchers.json_params_matcher(None)], + ) + + rsps.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[matchers.urlencoded_params_matcher(None)], + ) + + resp = requests.request("POST", "http://example.com/") + assert_response(resp, "one") + + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + ) + assert_response(resp, "two") + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + # test exception raise if matcher body is None but request data is not None + rsps.add( + method=responses.POST, + url="http://example.com/", + body="one", + match=[matchers.json_params_matcher(None)], + ) + + with pytest.raises(ConnectionError) as excinfo: + resp = requests.request( + "POST", + "http://example.com/", + json={"my": "data"}, + headers={"Content-Type": "application/json"}, + ) + + msg = str(excinfo.value) + assert "request.body doesn't match: {my: data} doesn't match {}" in msg + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + method=responses.POST, + url="http://example.com/", + body="two", + match=[matchers.urlencoded_params_matcher(None)], + ) + with pytest.raises(ConnectionError) as excinfo: + resp = requests.request( + "POST", + "http://example.com/", + headers={"Content-Type": "x-www-form-urlencoded"}, + data={"page": "second", "type": "urlencoded"}, + ) + msg = str(excinfo.value) + assert ( + "request.body doesn't match: {page: second, type: urlencoded} doesn't match {}" + in msg + ) + + run() + assert_reset() + + +def test_request_matches_params(): + @responses.activate + def run(): + url = "http://example.com/test" + params = {"hello": "world", "I am": "a big test"} + responses.add( + method=responses.GET, + url=url, + body="test", + match=[matchers.query_param_matcher(params)], + match_querystring=False, + ) + + # exchange parameter places for the test + params = { + "I am": "a big test", + "hello": "world", + } + resp = requests.get(url, params=params) + + constructed_url = r"http://example.com/test?I+am=a+big+test&hello=world" + assert resp.url == constructed_url + assert resp.request.url == constructed_url + + resp_params = getattr(resp.request, "params") + assert resp_params == params + + run() + assert_reset() + + +def test_fail_matchers_error(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.urlencoded_params_matcher + validate matchers.json_params_matcher + validate matchers.query_param_matcher + validate matchers.request_kwargs_matcher + :return: None + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "POST", + "http://example.com", + match=[matchers.urlencoded_params_matcher({"foo": "bar"})], + ) + rsps.add( + "POST", + "http://example.com", + match=[matchers.json_params_matcher({"fail": "json"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://example.com", data={"id": "bad"}) + + msg = str(excinfo.value) + assert ( + "request.body doesn't match: {id: bad} doesn't match {foo: bar}" in msg + ) + + assert ( + "request.body doesn't match: JSONDecodeError: Cannot parse request.body" + in msg + ) + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "GET", + "http://111.com", + match=[matchers.query_param_matcher({"my": "params"})], + ) + + rsps.add( + method=responses.GET, + url="http://111.com/", + body="two", + match=[matchers.json_params_matcher({"page": "one"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get( + "http://111.com", params={"id": "bad"}, json={"page": "two"} + ) + + msg = str(excinfo.value) + assert ( + "Parameters do not match. {id: bad} doesn't match {my: params}" in msg + ) + assert ( + "request.body doesn't match: {page: two} doesn't match {page: one}" + in msg + ) + + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_kwargs = { + "stream": True, + "verify": False, + } + rsps.add( + "GET", + "http://111.com", + match=[matchers.request_kwargs_matcher(req_kwargs)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://111.com", stream=True) + + msg = str(excinfo.value) + assert ( + "Arguments don't match: " + "{stream: True, verify: True} doesn't match {stream: True, verify: False}" + ) in msg + + run() + assert_reset() + + +@pytest.mark.parametrize( + "req_file,match_file", + [ + (b"Old World!", "Old World!"), + ("Old World!", b"Old World!"), + (b"Old World!", b"Old World!"), + ("Old World!", "Old World!"), + (b"\xacHello World!", b"\xacHello World!"), + ], +) +def test_multipart_matcher(req_file, match_file): + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + responses.add( + responses.POST, + url="http://httpbin.org/post", + match=[ + matchers.multipart_matcher( + files={"file_name": match_file}, data=req_data + ) + ], + ) + resp = requests.post( + "http://httpbin.org/post", data=req_data, files={"file_name": req_file} + ) + assert resp.status_code == 200 + + with pytest.raises(TypeError): + responses.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(files={})], + ) + + run() + assert_reset() + + +def test_multipart_matcher_fail(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.multipart_matcher + :return: None + """ + + def run(): + # different file contents + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post( + "http://httpbin.org/post", + data=req_data, + files={"file_name": b"New World!"}, + ) + + msg = str(excinfo.value) + assert "multipart/form-data doesn't match. Request body differs." in msg + + assert ( + r'\r\nContent-Disposition: form-data; name="file_name"; ' + r'filename="file_name"\r\n\r\nOld World!\r\n' + ) in msg + assert ( + r'\r\nContent-Disposition: form-data; name="file_name"; ' + r'filename="file_name"\r\n\r\nNew World!\r\n' + ) in msg + + # x-www-form-urlencoded request + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://httpbin.org/post", data=req_data) + + msg = str(excinfo.value) + assert ( + "multipart/form-data doesn't match. Request headers['Content-Type'] is different." + in msg + ) + assert ( + "application/x-www-form-urlencoded isn't equal to multipart/form-data; boundary=" + in msg + ) + + # empty body request + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + req_files = {"file_name": b"Old World!"} + rsps.add( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://httpbin.org/post") + + msg = str(excinfo.value) + assert "Request is missing the 'Content-Type' header" in msg + + run() + assert_reset() + + +def test_query_string_matcher_raises(): + """ + Validate that Exception is raised if request does not match responses.matchers + validate matchers.query_string_matcher + :return: None + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add( + "GET", + "http://111.com", + match=[matchers.query_string_matcher("didi=pro")], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://111.com", params={"test": "1", "didi": "pro"}) + + msg = str(excinfo.value) + assert ( + "Query string doesn't match. {didi: pro, test: 1} doesn't match {didi: pro}" + in msg + ) + + run() + assert_reset() + + +def test_request_matches_headers(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + json={"success": True}, + match=[matchers.header_matcher({"Accept": "application/json"})], + ) + + responses.add( + method=responses.GET, + url=url, + body="success", + match=[matchers.header_matcher({"Accept": "text/plain"})], + ) + + # the actual request can contain extra headers (requests always adds some itself anyway) + resp = requests.get( + url, headers={"Accept": "application/json", "Accept-Charset": "utf-8"} + ) + assert_response(resp, body='{"success": true}', content_type="application/json") + + resp = requests.get(url, headers={"Accept": "text/plain"}) + assert_response(resp, body="success", content_type="text/plain") + + run() + assert_reset() + + +def test_request_matches_headers_no_match(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + json={"success": True}, + match=[matchers.header_matcher({"Accept": "application/json"})], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get(url, headers={"Accept": "application/xml"}) + + msg = str(excinfo.value) + assert ( + "Headers do not match: {Accept: application/xml} doesn't match " + "{Accept: application/json}" + ) in msg + + run() + assert_reset() + + +def test_request_matches_headers_strict_match(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + method=responses.GET, + url=url, + body="success", + match=[ + matchers.header_matcher({"Accept": "text/plain"}, strict_match=True) + ], + ) + + # requests will add some extra headers of its own, so we have to use prepared requests + session = requests.Session() + + # make sure we send *just* the header we're expectin + prepped = session.prepare_request( + requests.Request( + method="GET", + url=url, + ) + ) + prepped.headers.clear() + prepped.headers["Accept"] = "text/plain" + + resp = session.send(prepped) + assert_response(resp, body="success", content_type="text/plain") + + # include the "Accept-Charset" header, which will fail to match + prepped = session.prepare_request( + requests.Request( + method="GET", + url=url, + ) + ) + prepped.headers.clear() + prepped.headers["Accept"] = "text/plain" + prepped.headers["Accept-Charset"] = "utf-8" + + with pytest.raises(ConnectionError) as excinfo: + session.send(prepped) + + msg = str(excinfo.value) + assert ( + "Headers do not match: {Accept: text/plain, Accept-Charset: utf-8} " + "doesn't match {Accept: text/plain}" + ) in msg + + run() + assert_reset() + + +def test_fragment_identifier_matcher(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com", + match=[matchers.fragment_identifier_matcher("test=1&foo=bar")], + body=b"test", + ) + + resp = requests.get("http://example.com#test=1&foo=bar") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_fragment_identifier_matcher_error(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com/", + match=[matchers.fragment_identifier_matcher("test=1")], + ) + responses.add( + responses.GET, + "http://example.com/", + match=[matchers.fragment_identifier_matcher(None)], + ) + + with pytest.raises(ConnectionError) as excinfo: + requests.get("http://example.com/#test=2") + + msg = str(excinfo.value) + assert ( + "URL fragment identifier is different: test=1 doesn't match test=2" + ) in msg + assert ( + "URL fragment identifier is different: None doesn't match test=2" + ) in msg + + run() + assert_reset() + + +def test_fragment_identifier_matcher_and_match_querystring(): + @responses.activate + def run(): + url = "http://example.com?ab=xy&zed=qwe#test=1&foo=bar" + responses.add( + responses.GET, + url, + match_querystring=True, + match=[matchers.fragment_identifier_matcher("test=1&foo=bar")], + body=b"test", + ) + + # two requests to check reversed order of fragment identifier + resp = requests.get("http://example.com?ab=xy&zed=qwe#test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?zed=qwe&ab=xy#foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_matchers_create_key_val_str(): + """ + Test that matchers._create_key_val_str does recursive conversion + """ + data = { + "my_list": [ + 1, + 2, + "a", + {"key1": "val1", "key2": 2, 3: "test"}, + "!", + [["list", "nested"], {"nested": "dict"}], + ], + 1: 4, + "test": "val", + "high": {"nested": "nested_dict"}, + } + conv_str = matchers._create_key_val_str(data) + reference = ( + "{1: 4, high: {nested: nested_dict}, my_list: [!, 1, 2, [[list, nested], {nested: dict}], " + "a, {3: test, key1: val1, key2: 2}], test: val}" + ) + assert conv_str == reference diff --git a/venv/lib/python3.10/site-packages/responses/test_registries.py b/venv/lib/python3.10/site-packages/responses/test_registries.py new file mode 100644 index 0000000000000000000000000000000000000000..b4dd8cc5600403da68c89378646dee33b7e6b7f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/test_registries.py @@ -0,0 +1,70 @@ +import pytest + +import responses +from responses import registries +from responses.test_responses import assert_reset + + +def test_set_registry_not_empty(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + @responses.activate + def run(): + url = "http://fizzbuzz/foo" + responses.add(method=responses.GET, url=url) + with pytest.raises(AttributeError) as excinfo: + responses.mock._set_registry(CustomRegistry) + msg = str(excinfo.value) + assert "Cannot replace Registry, current registry has responses" in msg + + run() + assert_reset() + + +def test_set_registry(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + @responses.activate(registry=CustomRegistry) + def run_with_registry(): + assert type(responses.mock._get_registry()) == CustomRegistry + + @responses.activate + def run(): + # test that registry does not leak to another test + assert type(responses.mock._get_registry()) == registries.FirstMatchRegistry + + run_with_registry() + run() + assert_reset() + + +def test_set_registry_context_manager(): + def run(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + with responses.RequestsMock( + assert_all_requests_are_fired=False, registry=CustomRegistry + ) as rsps: + assert type(rsps._get_registry()) == CustomRegistry + assert type(responses.mock._get_registry()) == registries.FirstMatchRegistry + + run() + assert_reset() + + +def test_registry_reset(): + def run(): + class CustomRegistry(registries.FirstMatchRegistry): + pass + + with responses.RequestsMock( + assert_all_requests_are_fired=False, registry=CustomRegistry + ) as rsps: + rsps._get_registry().reset() + assert not rsps.registered() + + run() + assert_reset() diff --git a/venv/lib/python3.10/site-packages/responses/test_responses.py b/venv/lib/python3.10/site-packages/responses/test_responses.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6f62249382a39c06a2cbc086a94a5481d60e4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/responses/test_responses.py @@ -0,0 +1,1927 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function, division, unicode_literals + +import inspect +import os +import re +from io import BufferedReader, BytesIO + +import pytest +import requests +import responses +from requests.exceptions import ConnectionError, HTTPError, ChunkedEncodingError +from responses import ( + BaseResponse, + Response, + PassthroughResponse, + matchers, + CallbackResponse, +) + + +try: + from mock import patch, Mock +except ImportError: + from unittest.mock import patch, Mock # type: ignore + + +def assert_reset(): + assert len(responses._default_mock.registered()) == 0 + assert len(responses.calls) == 0 + + +def assert_response(resp, body=None, content_type="text/plain"): + assert resp.status_code == 200 + assert resp.reason == "OK" + if content_type is not None: + assert resp.headers["Content-Type"] == content_type + else: + assert "Content-Type" not in resp.headers + assert resp.text == body + + +def assert_params(resp, expected): + assert hasattr(resp, "request"), "Missing request" + assert hasattr( + resp.request, "params" + ), "Missing params on request that responses should add" + assert getattr(resp.request, "params") == expected, "Incorrect parameters" + + +def test_response(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + assert responses.calls[0].response.content == b"test" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "test") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + assert responses.calls[1].response.content == b"test" + + run() + assert_reset() + + +def test_response_encoded(): + @responses.activate + def run(): + # Path contains urlencoded =/()[] + url = "http://example.org/foo.bar%3D%2F%28%29%5B%5D" + responses.add(responses.GET, url, body="it works", status=200) + resp = requests.get(url) + assert_response(resp, "it works") + + run() + assert_reset() + + +def test_response_with_instance(): + @responses.activate + def run(): + responses.add( + responses.Response(method=responses.GET, url="http://example.com") + ) + resp = requests.get("http://example.com") + assert_response(resp, "") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_replace(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(original, BaseResponse): + responses.add(original) + else: + responses.add(responses.GET, original, body="test2") + + responses.add(responses.GET, "http://example.com/three", body="test3") + responses.add( + responses.GET, re.compile(r"http://example\.com/four"), body="test3" + ) + + if isinstance(replacement, BaseResponse): + responses.replace(replacement) + else: + responses.replace(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/one", re.compile(r"http://example\.com/one")), + (re.compile(r"http://example\.com/one"), "http://example.com/one"), + ], +) +def test_replace_error(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, original) + with pytest.raises(ValueError) as excinfo: + responses.replace(responses.GET, replacement) + assert "Response is not registered for URL %s" % replacement in str( + excinfo.value + ) + + run() + assert_reset() + + +def test_replace_response_object_error(): + @responses.activate + def run(): + responses.add(Response(method=responses.GET, url="http://example.com/one")) + with pytest.raises(ValueError) as excinfo: + responses.replace( + Response(method=responses.GET, url="http://example.com/two") + ) + assert "Response is not registered for URL http://example.com/two" in str( + excinfo.value + ) + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_upsert_replace(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(original, BaseResponse): + responses.add(original) + else: + responses.add(responses.GET, original, body="test2") + + if isinstance(replacement, BaseResponse): + responses.upsert(replacement) + else: + responses.upsert(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "original,replacement", + [ + ("http://example.com/two", "http://example.com/two"), + ( + Response(method=responses.GET, url="http://example.com/two"), + Response( + method=responses.GET, url="http://example.com/two", body="testtwo" + ), + ), + ( + re.compile(r"http://example\.com/two"), + re.compile(r"http://example\.com/two"), + ), + ], +) +def test_upsert_add(original, replacement): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="test1") + + if isinstance(replacement, BaseResponse): + responses.upsert(replacement) + else: + responses.upsert(responses.GET, replacement, body="testtwo") + + resp = requests.get("http://example.com/two") + assert_response(resp, "testtwo") + + run() + assert_reset() + + +def test_remove(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/zero") + responses.add(responses.GET, "http://example.com/one") + responses.add(responses.GET, "http://example.com/two") + responses.add(responses.GET, re.compile(r"http://example\.com/three")) + responses.add(responses.GET, re.compile(r"http://example\.com/four")) + re.purge() + responses.remove(responses.GET, "http://example.com/two") + responses.remove(Response(method=responses.GET, url="http://example.com/zero")) + responses.remove(responses.GET, re.compile(r"http://example\.com/four")) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/zero") + requests.get("http://example.com/one") + with pytest.raises(ConnectionError): + requests.get("http://example.com/two") + requests.get("http://example.com/three") + with pytest.raises(ConnectionError): + requests.get("http://example.com/four") + + run() + assert_reset() + + +@pytest.mark.parametrize( + "args1,kwargs1,args2,kwargs2,expected", + [ + ((responses.GET, "a"), {}, (responses.GET, "a"), {}, True), + ((responses.GET, "a"), {}, (responses.GET, "b"), {}, False), + ((responses.GET, "a"), {}, (responses.POST, "a"), {}, False), + ( + (responses.GET, "a"), + {"match_querystring": True}, + (responses.GET, "a"), + {}, + True, + ), + ], +) +def test_response_equality(args1, kwargs1, args2, kwargs2, expected): + o1 = BaseResponse(*args1, **kwargs1) + o2 = BaseResponse(*args2, **kwargs2) + assert (o1 == o2) is expected + assert (o1 != o2) is not expected + + +def test_response_equality_different_objects(): + o1 = BaseResponse(method=responses.GET, url="a") + o2 = "str" + assert (o1 == o2) is False + assert (o1 != o2) is True + + +def test_connection_error(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com") + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo") + + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/foo" + assert type(responses.calls[0].response) is ConnectionError + assert responses.calls[0].response.request + + run() + assert_reset() + + +def test_match_querystring(): + @responses.activate + def run(): + url = "http://example.com?test=1&foo=bar" + responses.add(responses.GET, url, match_querystring=True, body=b"test") + resp = requests.get("http://example.com?test=1&foo=bar") + assert_response(resp, "test") + resp = requests.get("http://example.com?foo=bar&test=1") + assert_response(resp, "test") + resp = requests.get("http://example.com/?foo=bar&test=1") + assert_response(resp, "test") + + run() + assert_reset() + + +def test_match_querystring_empty(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com", body=b"test", match_querystring=True + ) + resp = requests.get("http://example.com") + assert_response(resp, "test") + resp = requests.get("http://example.com/") + assert_response(resp, "test") + with pytest.raises(ConnectionError): + requests.get("http://example.com?query=foo") + + run() + assert_reset() + + +def test_match_querystring_error(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com/?test=1", match_querystring=True + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=2") + + run() + assert_reset() + + +def test_match_querystring_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=1"), + body="test1", + match_querystring=True, + ) + + resp = requests.get("http://example.com/foo/?test=1") + assert_response(resp, "test1") + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=2"), + body="test2", + match_querystring=False, + ) + + resp = requests.get("http://example.com/foo/?test=2") + assert_response(resp, "test2") + + run() + assert_reset() + + +def test_match_querystring_error_regex(): + @responses.activate + def run(): + """Note that `match_querystring` value shouldn't matter when passing a + regular expression""" + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=1"), + match_querystring=True, + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=3") + + responses.add( + responses.GET, + re.compile(r"http://example\.com/foo/\?test=2"), + match_querystring=False, + ) + + with pytest.raises(ConnectionError): + requests.get("http://example.com/foo/?test=4") + + run() + assert_reset() + + +def test_match_querystring_auto_activates(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com?test=1", body=b"test") + resp = requests.get("http://example.com?test=1") + assert_response(resp, "test") + with pytest.raises(ConnectionError): + requests.get("http://example.com/?test=2") + + run() + assert_reset() + + +def test_match_querystring_missing_key(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com?foo=1&bar=2", body=b"test") + with pytest.raises(ConnectionError): + requests.get("http://example.com/?foo=1&baz=2") + + with pytest.raises(ConnectionError): + requests.get("http://example.com/?bar=2&fez=1") + + run() + assert_reset() + + +def test_accept_string_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test") + resp = requests.get(url) + assert_response(resp, "test") + + run() + assert_reset() + + +def test_accept_json_body(): + @responses.activate + def run(): + content_type = "application/json" + + url = "http://example.com/" + responses.add(responses.GET, url, json={"message": "success"}) + resp = requests.get(url) + assert_response(resp, '{"message": "success"}', content_type) + + url = "http://example.com/1/" + responses.add(responses.GET, url, json=[]) + resp = requests.get(url) + assert_response(resp, "[]", content_type) + + run() + assert_reset() + + +def test_no_content_type(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test", content_type=None) + resp = requests.get(url) + assert_response(resp, "test", content_type=None) + + run() + assert_reset() + + +def test_arbitrary_status_code(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add(responses.GET, url, body="test", status=419) + resp = requests.get(url) + assert resp.status_code == 419 + assert resp.reason is None + + run() + assert_reset() + + +def test_throw_connection_error_explicit(): + @responses.activate + def run(): + url = "http://example.com" + exception = HTTPError("HTTP Error") + responses.add(responses.GET, url, exception) + + with pytest.raises(HTTPError) as HE: + requests.get(url) + + assert str(HE.value) == "HTTP Error" + + run() + assert_reset() + + +def test_callback(): + body = b"test callback" + status = 400 + reason = "Bad Request" + headers = { + "foo": "bar", + "Content-Type": "application/json", + "Content-Length": "13", + } + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert "bar" == resp.headers.get("foo") + assert "application/json" == resp.headers.get("Content-Type") + assert "13" == resp.headers.get("Content-Length") + + run() + assert_reset() + + +def test_callback_deprecated_stream_argument(): + with pytest.deprecated_call(): + CallbackResponse(responses.GET, "url", lambda x: x, stream=False) + + +def test_callback_deprecated_match_querystring_argument(): + with pytest.deprecated_call(): + CallbackResponse(responses.GET, "url", lambda x: x, match_querystring=False) + + +def test_callback_match_querystring_default_false(): + """ + Test to ensure that by default 'match_querystring' in 'add_callback' is set to False + and does not raise deprecation + see: https://github.com/getsentry/responses/issues/464 and related PR + """ + body = b"test callback" + status = 200 + params = {"hello": "world", "I am": "a big test"} + headers = {"foo": "bar"} + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback, content_type=None) + resp = requests.get(url, params=params) + assert resp.text == "test callback" + assert resp.status_code == status + assert "foo" in resp.headers + + with pytest.warns(None) as record: + run() + + # check that no deprecation warning was raised + assert not record + + assert_reset() + + +def test_callback_exception_result(): + result = Exception() + url = "http://example.com/" + + def request_callback(request): + return result + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + + with pytest.raises(Exception) as e: + requests.get(url) + + assert e.value is result + + run() + assert_reset() + + +def test_callback_exception_body(): + body = Exception() + url = "http://example.com/" + + def request_callback(request): + return 200, {}, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + + with pytest.raises(Exception) as e: + requests.get(url) + + assert e.value is body + + run() + assert_reset() + + +def test_callback_no_content_type(): + body = b"test callback" + status = 400 + reason = "Bad Request" + headers = {"foo": "bar"} + url = "http://example.com/" + + def request_callback(_request): + return status, headers, body + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback, content_type=None) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert resp.reason == reason + assert "foo" in resp.headers + assert "Content-Type" not in resp.headers + + run() + assert_reset() + + +def test_callback_content_type_dict(): + def request_callback(request): + return ( + 200, + {"Content-Type": "application/json"}, + b"foo", + ) + + @responses.activate + def run(): + responses.add_callback("GET", "http://mockhost/.foo", callback=request_callback) + resp = requests.get("http://mockhost/.foo") + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_callback_matchers(): + def request_callback(request): + return ( + 200, + {"Content-Type": "application/json"}, + b"foo", + ) + + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + + responses.add_callback( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + callback=request_callback, + ) + resp = requests.post("http://httpbin.org/post", data=req_data, files=req_files) + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_callback_matchers_fail(): + @responses.activate + def run(): + req_data = {"some": "other", "data": "fields"} + req_files = {"file_name": b"Old World!"} + + responses.add_callback( + responses.POST, + url="http://httpbin.org/post", + match=[matchers.multipart_matcher(req_files, data=req_data)], + callback=lambda x: ( + 0, + {"a": ""}, + "", + ), + ) + with pytest.raises(ConnectionError) as exc: + requests.post( + "http://httpbin.org/post", + data={"some": "other", "data": "wrong"}, + files=req_files, + ) + + assert "multipart/form-data doesn't match." in str(exc.value) + + run() + assert_reset() + + +def test_callback_content_type_tuple(): + def request_callback(request): + return ( + 200, + [("Content-Type", "application/json")], + b"foo", + ) + + @responses.activate + def run(): + responses.add_callback("GET", "http://mockhost/.foo", callback=request_callback) + resp = requests.get("http://mockhost/.foo") + assert resp.text == "foo" + assert resp.headers["content-type"] == "application/json" + + run() + assert_reset() + + +def test_regular_expression_url(): + @responses.activate + def run(): + url = re.compile(r"https?://(.*\.)?example.com") + responses.add(responses.GET, url, body=b"test") + + resp = requests.get("http://example.com") + assert_response(resp, "test") + + resp = requests.get("https://example.com") + assert_response(resp, "test") + + resp = requests.get("https://uk.example.com") + assert_response(resp, "test") + + with pytest.raises(ConnectionError): + requests.get("https://uk.exaaample.com") + + run() + assert_reset() + + +def test_base_response_get_response(): + resp = BaseResponse("GET", ".com") + with pytest.raises(NotImplementedError): + resp.get_response(requests.PreparedRequest()) + + +def test_custom_adapter(): + @responses.activate + def run(): + url = "http://example.com" + responses.add(responses.GET, url, body=b"test") + + calls = [0] + + class DummyAdapter(requests.adapters.HTTPAdapter): + def send(self, *a, **k): + calls[0] += 1 + return super(DummyAdapter, self).send(*a, **k) + + # Test that the adapter is actually used + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url, allow_redirects=False) + assert calls[0] == 1 + + # Test that the response is still correctly emulated + session = requests.Session() + session.mount("http://", DummyAdapter()) + + resp = session.get(url) + assert_response(resp, "test") + + run() + + +def test_responses_as_context_manager(): + def run(): + with responses.mock: + responses.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + assert len(responses.calls) == 1 + assert responses.calls[0].request.url == "http://example.com/" + assert responses.calls[0].response.content == b"test" + + resp = requests.get("http://example.com?foo=bar") + assert_response(resp, "test") + assert len(responses.calls) == 2 + assert responses.calls[1].request.url == "http://example.com/?foo=bar" + assert responses.calls[1].response.content == b"test" + + run() + assert_reset() + + +def test_activate_doesnt_change_signature(): + def test_function(a, b=None): + return (a, b) + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + assert decorated_test_function(1, 2) == test_function(1, 2) + assert decorated_test_function(3) == test_function(3) + + +@pytest.fixture +def my_fruit(): + return "apple" + + +@pytest.fixture +def fruit_basket(my_fruit): + return ["banana", my_fruit] + + +@pytest.mark.usefixtures("my_fruit", "fruit_basket") +class TestFixtures(object): + """ + Test that pytest fixtures work well with 'activate' decorator + """ + + def test_function(self, my_fruit, fruit_basket): + assert my_fruit in fruit_basket + assert my_fruit == "apple" + + test_function_decorated = responses.activate(test_function) + + +def test_activate_mock_interaction(): + @patch("sys.stdout") + def test_function(mock_stdout): + return mock_stdout + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + value = test_function() + assert isinstance(value, Mock) + + value = decorated_test_function() + assert isinstance(value, Mock) + + +def test_activate_doesnt_change_signature_with_return_type(): + def test_function(a, b=None): + return a, b + + # Add type annotations as they are syntax errors in py2. + # Use a class to test for import errors in evaled code. + test_function.__annotations__["return"] = Mock + test_function.__annotations__["a"] = Mock + + decorated_test_function = responses.activate(test_function) + assert inspect.signature(test_function) == inspect.signature( + decorated_test_function + ) + + assert decorated_test_function(1, 2) == test_function(1, 2) + assert decorated_test_function(3) == test_function(3) + + +def test_activate_doesnt_change_signature_for_method(): + class TestCase(object): + def test_function(self, a, b=None): + return (self, a, b) + + decorated_test_function = responses.activate(test_function) + + test_case = TestCase() + assert test_case.decorated_test_function(1, 2) == test_case.test_function(1, 2) + assert test_case.decorated_test_function(3) == test_case.test_function(3) + + +def test_response_cookies(): + body = b"test callback" + status = 200 + headers = {"set-cookie": "session_id=12345; a=b; c=d"} + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert "session_id" in resp.cookies + assert resp.cookies["session_id"] == "12345" + assert set(resp.cookies.keys()) == set(["session_id"]) + + run() + assert_reset() + + +def test_response_cookies_secure(): + body = b"test callback" + status = 200 + headers = {"set-cookie": "session_id=12345; a=b; c=d; secure"} + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert "session_id" in resp.cookies + assert resp.cookies["session_id"] == "12345" + assert set(resp.cookies.keys()) == set(["session_id"]) + + run() + assert_reset() + + +def test_response_cookies_multiple(): + body = b"test callback" + status = 200 + headers = [ + ("set-cookie", "1P_JAR=2019-12-31-23; path=/; domain=.example.com; HttpOnly"), + ("set-cookie", "NID=some=value; path=/; domain=.example.com; secure"), + ] + url = "http://example.com/" + + def request_callback(request): + return (status, headers, body) + + @responses.activate + def run(): + responses.add_callback(responses.GET, url, request_callback) + resp = requests.get(url) + assert resp.text == "test callback" + assert resp.status_code == status + assert set(resp.cookies.keys()) == set(["1P_JAR", "NID"]) + assert resp.cookies["1P_JAR"] == "2019-12-31-23" + assert resp.cookies["NID"] == "some=value" + + run() + assert_reset() + + +@pytest.mark.parametrize("request_stream", (True, False, None)) +@pytest.mark.parametrize("responses_stream", (True, False, None)) +def test_response_cookies_session(request_stream, responses_stream): + @responses.activate + def run(): + url = "https://example.com/path" + responses.add( + responses.GET, + url, + headers=[ + ("Set-cookie", "mycookie=cookieval; path=/; secure"), + ], + body="ok", + stream=responses_stream, + ) + session = requests.session() + resp = session.get(url, stream=request_stream) + assert resp.text == "ok" + assert resp.status_code == 200 + + assert "mycookie" in resp.cookies + assert resp.cookies["mycookie"] == "cookieval" + assert set(resp.cookies.keys()) == set(["mycookie"]) + + assert "mycookie" in session.cookies + assert session.cookies["mycookie"] == "cookieval" + assert set(session.cookies.keys()) == set(["mycookie"]) + + run() + assert_reset() + + +def test_response_callback(): + """adds a callback to decorate the response, then checks it""" + + def run(): + def response_callback(resp): + resp._is_mocked = True + return resp + + with responses.RequestsMock(response_callback=response_callback) as m: + m.add(responses.GET, "http://example.com", body=b"test") + resp = requests.get("http://example.com") + assert resp.text == "test" + assert hasattr(resp, "_is_mocked") + assert getattr(resp, "_is_mocked") is True + + run() + assert_reset() + + +def test_response_filebody(): + """ Adds the possibility to use actual (binary) files as responses """ + + def run(): + current_file = os.path.abspath(__file__) + with responses.RequestsMock() as m: + with open(current_file, "r") as out: + m.add(responses.GET, "http://example.com", body=out.read(), stream=True) + resp = requests.get("http://example.com", stream=True) + with open(current_file, "r") as out: + assert resp.text == out.read() + + run() + assert_reset() + + +def test_use_stream_twice_to_double_raw_io(): + @responses.activate + def run(): + url = "http://example.com" + responses.add(responses.GET, url, body=b"42", stream=True) + resp = requests.get(url, stream=True) + assert resp.raw.read() == b"42" + + run() + assert_reset() + + +def test_assert_all_requests_are_fired(): + def request_callback(request): + raise BaseException() + + def run(): + with pytest.raises(AssertionError) as excinfo: + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=b"test") + assert "http://example.com" in str(excinfo.value) + assert responses.GET in str(excinfo.value) + + # check that assert_all_requests_are_fired default to True + with pytest.raises(AssertionError): + with responses.RequestsMock() as m: + m.add(responses.GET, "http://example.com", body=b"test") + + # check that assert_all_requests_are_fired doesn't swallow exceptions + with pytest.raises(ValueError): + with responses.RequestsMock() as m: + m.add(responses.GET, "http://example.com", body=b"test") + raise ValueError() + + # check that assert_all_requests_are_fired=True doesn't remove urls + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=b"test") + assert len(m.registered()) == 1 + requests.get("http://example.com") + assert len(m.registered()) == 1 + + # check that assert_all_requests_are_fired=True counts mocked errors + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add(responses.GET, "http://example.com", body=Exception()) + assert len(m.registered()) == 1 + with pytest.raises(Exception): + requests.get("http://example.com") + assert len(m.registered()) == 1 + + with responses.RequestsMock(assert_all_requests_are_fired=True) as m: + m.add_callback(responses.GET, "http://example.com", request_callback) + assert len(m.registered()) == 1 + with pytest.raises(BaseException): + requests.get("http://example.com") + assert len(m.registered()) == 1 + + run() + assert_reset() + + +def test_allow_redirects_samehost(): + redirecting_url = "http://example.com" + final_url_path = "/1" + final_url = "{0}{1}".format(redirecting_url, final_url_path) + url_re = re.compile(r"^http://example.com(/)?(\d+)?$") + + def request_callback(request): + # endpoint of chained redirect + if request.url.endswith(final_url_path): + return 200, (), b"test" + + # otherwise redirect to an integer path + else: + if request.url.endswith("/0"): + n = 1 + else: + n = 0 + redirect_headers = {"location": "/{0!s}".format(n)} + return 301, redirect_headers, None + + def run(): + # setup redirect + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_no_redirects = requests.get(redirecting_url, allow_redirects=False) + assert resp_no_redirects.status_code == 301 + assert len(responses.calls) == 1 # 1x300 + assert responses.calls[0][1].status_code == 301 + assert_reset() + + with responses.mock: + responses.add_callback(responses.GET, url_re, request_callback) + resp_yes_redirects = requests.get(redirecting_url, allow_redirects=True) + assert len(responses.calls) == 3 # 2x300 + 1x200 + assert len(resp_yes_redirects.history) == 2 + assert resp_yes_redirects.status_code == 200 + assert final_url == resp_yes_redirects.url + status_codes = [call[1].status_code for call in responses.calls] + assert status_codes == [301, 301, 200] + assert_reset() + + run() + assert_reset() + + +def test_handles_unicode_querystring(): + url = "http://example.com/test?type=2&ie=utf8&query=汉字" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="test", match_querystring=True) + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_handles_unicode_url(): + url = "http://www.संजाल.भारत/hi/वेबसाइट-डिजाइन" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="test") + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_handles_unicode_body(): + url = "http://example.com/test" + + @responses.activate + def run(): + responses.add(responses.GET, url, body="михољско лето") + + resp = requests.get(url) + + assert_response(resp, "михољско лето", content_type="text/plain; charset=utf-8") + + run() + assert_reset() + + +def test_handles_buffered_reader_body(): + url = "http://example.com/test" + + @responses.activate + def run(): + responses.add(responses.GET, url, body=BufferedReader(BytesIO(b"test"))) # type: ignore + + resp = requests.get(url) + + assert_response(resp, "test") + + run() + assert_reset() + + +def test_headers(): + @responses.activate + def run(): + responses.add( + responses.GET, "http://example.com", body="", headers={"X-Test": "foo"} + ) + resp = requests.get("http://example.com") + assert resp.headers["X-Test"] == "foo" + + run() + assert_reset() + + +def test_content_length_error(monkeypatch): + """ + Currently 'requests' does not enforce content length validation, + (validation that body length matches header). However, this could + be expected in next major version, see + https://github.com/psf/requests/pull/3563 + + Now user can manually patch URL3 lib to achieve the same + + See discussion in + https://github.com/getsentry/responses/issues/394 + """ + + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com/api/123", + json={"message": "this body is too large"}, + adding_headers={"content-length": "2"}, + ) + with pytest.raises(ChunkedEncodingError) as exc: + requests.get("http://example.com/api/123") + + assert "IncompleteRead" in str(exc.value) + + # Type errors here and on 1250 are ignored because the stubs for requests + # are off https://github.com/python/typeshed/blob/f8501d33c737482a829c6db557a0be26895c5941 + # /stubs/requests/requests/packages/__init__.pyi#L1 + original_init = getattr(requests.packages.urllib3.HTTPResponse, "__init__") # type: ignore + + def patched_init(self, *args, **kwargs): + kwargs["enforce_content_length"] = True + original_init(self, *args, **kwargs) + + monkeypatch.setattr( + requests.packages.urllib3.HTTPResponse, "__init__", patched_init # type: ignore + ) + + run() + assert_reset() + + +def test_stream_with_none_chunk_size(): + """ + See discussion in + https://github.com/getsentry/responses/issues/438 + """ + + @responses.activate + def run(): + responses.add( + responses.GET, + "https://example.com", + status=200, + content_type="application/octet-stream", + body=b"This is test", + auto_calculate_content_length=True, + ) + res = requests.get("https://example.com", stream=True) + for chunk in res.iter_content(chunk_size=None): + assert chunk == b"This is test" + + run() + assert_reset() + + +def test_legacy_adding_headers(): + @responses.activate + def run(): + responses.add( + responses.GET, + "http://example.com", + body="", + adding_headers={"X-Test": "foo"}, + ) + resp = requests.get("http://example.com") + assert resp.headers["X-Test"] == "foo" + + run() + assert_reset() + + +def test_auto_calculate_content_length_string_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, url, body="test", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "test") + assert resp.headers["Content-Length"] == "4" + + run() + assert_reset() + + +def test_auto_calculate_content_length_bytes_body(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, url, body=b"test bytes", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "test bytes") + assert resp.headers["Content-Length"] == "10" + + run() + assert_reset() + + +def test_auto_calculate_content_length_json_body(): + @responses.activate + def run(): + content_type = "application/json" + + url = "http://example.com/" + responses.add( + responses.GET, + url, + json={"message": "success"}, + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, '{"message": "success"}', content_type) + assert resp.headers["Content-Length"] == "22" + + url = "http://example.com/1/" + responses.add(responses.GET, url, json=[], auto_calculate_content_length=True) + resp = requests.get(url) + assert_response(resp, "[]", content_type) + assert resp.headers["Content-Length"] == "2" + + run() + assert_reset() + + +def test_auto_calculate_content_length_unicode_body(): + @responses.activate + def run(): + url = "http://example.com/test" + responses.add( + responses.GET, url, body="михољско лето", auto_calculate_content_length=True + ) + resp = requests.get(url) + assert_response(resp, "михољско лето", content_type="text/plain; charset=utf-8") + assert resp.headers["Content-Length"] == "25" + + run() + assert_reset() + + +def test_auto_calculate_content_length_doesnt_work_for_buffered_reader_body(): + @responses.activate + def run(): + url = "http://example.com/test" + responses.add( + responses.GET, + url, + body=BufferedReader(BytesIO(b"testing")), # type: ignore + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, "testing") + assert "Content-Length" not in resp.headers + + run() + assert_reset() + + +def test_auto_calculate_content_length_doesnt_override_existing_value(): + @responses.activate + def run(): + url = "http://example.com/" + responses.add( + responses.GET, + url, + body="test", + headers={"Content-Length": "2"}, + auto_calculate_content_length=True, + ) + resp = requests.get(url) + assert_response(resp, "test") + assert resp.headers["Content-Length"] == "2" + + run() + assert_reset() + + +def test_multiple_responses(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body="test") + responses.add(responses.GET, "http://example.com", body="rest") + responses.add(responses.GET, "http://example.com", body="fest") + responses.add(responses.GET, "http://example.com", body="best") + + resp = requests.get("http://example.com") + assert_response(resp, "test") + + resp = requests.get("http://example.com") + assert_response(resp, "rest") + + resp = requests.get("http://example.com") + assert_response(resp, "fest") + + resp = requests.get("http://example.com") + assert_response(resp, "best") + + # After all responses are used, last response should be repeated + resp = requests.get("http://example.com") + assert_response(resp, "best") + + run() + assert_reset() + + +def test_multiple_responses_intermixed(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com", body="test") + resp = requests.get("http://example.com") + assert_response(resp, "test") + + responses.add(responses.GET, "http://example.com", body="rest") + resp = requests.get("http://example.com") + assert_response(resp, "rest") + + responses.add(responses.GET, "http://example.com", body="best") + resp = requests.get("http://example.com") + assert_response(resp, "best") + + # After all responses are used, last response should be repeated + resp = requests.get("http://example.com") + assert_response(resp, "best") + + run() + assert_reset() + + +def test_multiple_urls(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("http://example.com/one") + assert_response(resp, "one") + + run() + assert_reset() + + +def test_multiple_methods(): + @responses.activate + def run(): + responses.add(responses.GET, "http://example.com/one", body="gotcha") + responses.add(responses.POST, "http://example.com/one", body="posted") + + resp = requests.get("http://example.com/one") + assert_response(resp, "gotcha") + resp = requests.post("http://example.com/one") + assert_response(resp, "posted") + + run() + assert_reset() + + +def test_passthrough_flag(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + response = Response(responses.GET, httpserver.url, body="MOCK") + + @responses.activate + def run_passthrough(): + responses.add(response) + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + @responses.activate + def run_mocked(): + responses.add(response) + resp = requests.get(httpserver.url) + assert_response(resp, "MOCK") + + run_mocked() + assert_reset() + + response.passthrough = True + run_passthrough() + assert_reset() + + +def test_passthrough_response(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add(PassthroughResponse(responses.GET, httpserver.url)) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + assert len(responses.calls) == 3 + responses.assert_call_count(httpserver.url, 1) + + run() + assert_reset() + + +def test_passthrough_response_stream(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add(PassthroughResponse(responses.GET, httpserver.url)) + content_1 = requests.get(httpserver.url).content + with requests.get(httpserver.url, stream=True) as resp: + content_2 = resp.raw.read() + assert content_1 == content_2 + + run() + assert_reset() + + +def test_passthru_prefixes(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run_constructor_argument(): + with responses.RequestsMock(passthru_prefixes=(httpserver.url,)): + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + @responses.activate + def run_property_setter(): + with responses.RequestsMock() as m: + m.passthru_prefixes = tuple([httpserver.url]) + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + run_constructor_argument() + assert_reset() + run_property_setter() + assert_reset() + + +def test_passthru(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add_passthru(httpserver.url) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get(httpserver.url) + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_regex(httpserver): + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def run(): + responses.add_passthru(re.compile("{}/\\w+".format(httpserver.url))) + responses.add(responses.GET, "{}/one".format(httpserver.url), body="one") + responses.add(responses.GET, "http://example.com/two", body="two") + + resp = requests.get("http://example.com/two") + assert_response(resp, "two") + resp = requests.get("{}/one".format(httpserver.url)) + assert_response(resp, "one") + resp = requests.get("{}/two".format(httpserver.url)) + assert_response(resp, "OK") + resp = requests.get("{}/three".format(httpserver.url)) + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_does_not_persist_across_tests(httpserver): + """ + passthru should be erased on exit from context manager + see: + https://github.com/getsentry/responses/issues/322 + """ + httpserver.serve_content("OK", headers={"Content-Type": "text/plain"}) + + @responses.activate + def with_a_passthru(): + assert not responses._default_mock.passthru_prefixes + responses.add_passthru(re.compile(".*")) + try: + response = requests.get("https://example.com") + except ConnectionError as err: # pragma: no cover + if "Failed to establish" in str(err): # pragma: no cover + pytest.skip("Cannot resolve DNS for example.com") # pragma: no cover + raise err # pragma: no cover + + assert response.status_code == 200 + + @responses.activate + def without_a_passthru(): + assert not responses._default_mock.passthru_prefixes + with pytest.raises(requests.exceptions.ConnectionError): + requests.get("https://example.com") + + with_a_passthru() + without_a_passthru() + + +def test_method_named_param(): + @responses.activate + def run(): + responses.add(method=responses.GET, url="http://example.com", body="OK") + resp = requests.get("http://example.com") + assert_response(resp, "OK") + + run() + assert_reset() + + +def test_passthru_unicode(): + @responses.activate + def run(): + with responses.RequestsMock() as m: + url = "http://موقع.وزارة-الاتصالات.مصر/" + clean_url = "http://xn--4gbrim.xn----ymcbaaajlc6dj7bxne2c.xn--wgbh1c/" + m.add_passthru(url) + assert m.passthru_prefixes[0] == clean_url + + run() + assert_reset() + + +def test_custom_target(monkeypatch): + requests_mock = responses.RequestsMock(target="something.else") + std_mock_mock = responses.std_mock.MagicMock() + patch_mock = std_mock_mock.patch + monkeypatch.setattr(responses, "std_mock", std_mock_mock) + requests_mock.start() + assert len(patch_mock.call_args_list) == 1 + assert patch_mock.call_args[1]["target"] == "something.else" + + +def test_cookies_from_headers(): + text = "こんにちは/世界" + quoted_text = responses.quote(text) + expected = {"x": "a", "y": quoted_text} + headers = {"set-cookie": "; ".join(k + "=" + v for k, v in expected.items())} + cookiejar = responses._cookies_from_headers(headers) + for k, v in cookiejar.items(): + assert isinstance(v, str) + assert v == expected[k] + + +@pytest.mark.parametrize( + "url", + ( + "http://example.com", + "http://example.com/some/path", + "http://example.com/other/path/", + ), +) +def test_request_param(url): + @responses.activate + def run(): + params = {"hello": "world", "example": "params"} + responses.add( + method=responses.GET, + url="{0}?hello=world".format(url), + body="test", + match_querystring=False, + ) + resp = requests.get(url, params=params) + assert_response(resp, "test") + assert_params(resp, params) + + resp = requests.get(url) + assert_response(resp, "test") + assert_params(resp, {}) + + run() + assert_reset() + + +def test_request_param_with_multiple_values_for_the_same_key(): + @responses.activate + def run(): + url = "http://example.com" + params = {"key1": ["one", "two"], "key2": "three"} + responses.add( + method=responses.GET, + url=url, + body="test", + ) + resp = requests.get(url, params=params) + assert_response(resp, "test") + assert_params(resp, params) + + run() + assert_reset() + + +@pytest.mark.parametrize( + "url", ("http://example.com", "http://example.com?hello=world") +) +def test_assert_call_count(url): + @responses.activate + def run(): + responses.add(responses.GET, url) + responses.add(responses.GET, "http://example1.com") + + assert responses.assert_call_count(url, 0) is True + + with pytest.raises(AssertionError) as excinfo: + responses.assert_call_count(url, 2) + assert "Expected URL '{0}' to be called 2 times. Called 0 times.".format( + url + ) in str(excinfo.value) + + requests.get(url) + assert responses.assert_call_count(url, 1) is True + + requests.get("http://example1.com") + assert responses.assert_call_count(url, 1) is True + + requests.get(url) + with pytest.raises(AssertionError) as excinfo: + responses.assert_call_count(url, 3) + assert "Expected URL '{0}' to be called 3 times. Called 2 times.".format( + url + ) in str(excinfo.value) + + run() + assert_reset() + + +def test_fail_request_error(): + """ + Validate that exception is raised if request URL/Method/kwargs don't match + :return: + """ + + def run(): + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + rsps.add("POST", "http://example1.com") + rsps.add("GET", "http://example.com") + + with pytest.raises(ConnectionError) as excinfo: + requests.post("http://example.com", data={"id": "bad"}) + + msg = str(excinfo.value) + assert "- POST http://example1.com/ URL does not match" in msg + assert "- GET http://example.com/ Method does not match" in msg + + run() + assert_reset() + + +@pytest.mark.parametrize( + "response_params, expected_representation", + [ + ( + {"method": responses.GET, "url": "http://example.com/"}, + ( + "" + ), + ), + ( + { + "method": responses.POST, + "url": "http://another-domain.com/", + "content_type": "application/json", + "status": 404, + }, + ( + "" + ), + ), + ( + { + "method": responses.PUT, + "url": "http://abcd.com/", + "content_type": "text/html", + "status": 500, + "headers": {"X-Test": "foo"}, + "body": {"it_wont_be": "considered"}, + }, + ( + "" + ), + ), + ], +) +def test_response_representations(response_params, expected_representation): + response = Response(**response_params) + + assert str(response) == expected_representation + assert repr(response) == expected_representation + + +def test_mocked_responses_list_registered(): + @responses.activate + def run(): + first_response = Response( + responses.GET, + "http://example.com/", + body="", + headers={"X-Test": "foo"}, + status=404, + ) + second_response = Response( + responses.GET, "http://example.com/", body="", headers={"X-Test": "foo"} + ) + third_response = Response( + responses.POST, + "http://anotherdomain.com/", + ) + responses.add(first_response) + responses.add(second_response) + responses.add(third_response) + + mocks_list = responses.registered() + + assert mocks_list == responses.mock.registered() + assert mocks_list == [first_response, second_response, third_response] + + run() + assert_reset() + + +@pytest.mark.parametrize( + "url,other_url", + [ + ("http://service-A/foo?q=fizz", "http://service-a/foo?q=fizz"), + ("http://service-a/foo", "http://service-A/foo"), + ("http://someHost-AwAy/", "http://somehost-away/"), + ("http://fizzbuzz/foo", "http://fizzbuzz/foo"), + ], +) +def test_rfc_compliance(url, other_url): + @responses.activate + def run(): + responses.add(method=responses.GET, url=url) + resp = requests.request("GET", other_url) + assert_response(resp, "") + + run() + assert_reset() + + +def test_requests_between_add(): + @responses.activate + def run(): + responses.add(responses.GET, "https://example.com/", json={"response": "old"}) + assert requests.get("https://example.com/").content == b'{"response": "old"}' + assert requests.get("https://example.com/").content == b'{"response": "old"}' + assert requests.get("https://example.com/").content == b'{"response": "old"}' + + responses.add(responses.GET, "https://example.com/", json={"response": "new"}) + + assert requests.get("https://example.com/").content == b'{"response": "new"}' + assert requests.get("https://example.com/").content == b'{"response": "new"}' + assert requests.get("https://example.com/").content == b'{"response": "new"}' + + run() + assert_reset() diff --git a/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..9c93e69a0fd11f7b218e1c8b4d6c8a897e6ead51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/METADATA @@ -0,0 +1,1124 @@ +Metadata-Version: 2.1 +Name: sacrebleu +Version: 2.4.2 +Summary: Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores +Home-page: https://github.com/mjpost/sacrebleu +Author: Matt Post +Author-email: post@cs.jhu.edu +Maintainer-email: post@cs.jhu.edu +License: Apache License 2.0 +Keywords: machine translation, evaluation, NLP, natural language processing, computational linguistics +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Text Processing +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Typing :: Typed +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE.txt +Requires-Dist: portalocker +Requires-Dist: regex +Requires-Dist: tabulate (>=0.8.9) +Requires-Dist: numpy (>=1.17) +Requires-Dist: colorama +Requires-Dist: lxml +Provides-Extra: dev +Requires-Dist: wheel ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: mypy ; extra == 'dev' +Requires-Dist: types-tabulate ; extra == 'dev' +Requires-Dist: lxml-stubs ; extra == 'dev' +Provides-Extra: ja +Requires-Dist: mecab-python3 (<=1.0.6,>=1.0.5) ; extra == 'ja' +Requires-Dist: ipadic (<2.0,>=1.0) ; extra == 'ja' +Provides-Extra: ko +Requires-Dist: mecab-ko (<=1.0.1,>=1.0.0) ; extra == 'ko' +Requires-Dist: mecab-ko-dic (<2.0,>=1.0) ; extra == 'ko' + +# sacreBLEU + +[![PyPI version](https://img.shields.io/pypi/v/sacrebleu)](https://img.shields.io/pypi/v/sacrebleu) +[![Python version](https://img.shields.io/pypi/pyversions/sacrebleu)](https://img.shields.io/pypi/pyversions/sacrebleu) +[![GitHub issues](https://img.shields.io/github/issues/mjpost/sacreBLEU.svg)](https://github.com/mjpost/sacrebleu/issues) + +SacreBLEU ([Post, 2018](http://aclweb.org/anthology/W18-6319)) provides hassle-free computation of shareable, comparable, and reproducible **BLEU** scores. +Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. +It also knows all the standard test sets and handles downloading, processing, and tokenization for you. + +The official version is hosted at . + +# Motivation + +Comparing BLEU scores is harder than it should be. Every decoder has its own implementation, often borrowed from Moses, but maybe with subtle changes. +Moses itself has a number of implementations as standalone scripts, with little indication of how they differ (note: they mostly don't, but `multi-bleu.pl` expects tokenized input). Different flags passed to each of these scripts can produce wide swings in the final score. All of these may handle tokenization in different ways. On top of this, downloading and managing test sets is a moderate annoyance. + +Sacre bleu! What a mess. + +**SacreBLEU** aims to solve these problems by wrapping the original reference implementation ([Papineni et al., 2002](https://www.aclweb.org/anthology/P02-1040.pdf)) together with other useful features. +The defaults are set the way that BLEU should be computed, and furthermore, the script outputs a short version string that allows others to know exactly what you did. +As an added bonus, it automatically downloads and manages test sets for you, so that you can simply tell it to score against `wmt14`, without having to hunt down a path on your local file system. +It is all designed to take BLEU a little more seriously. +After all, even with all its problems, BLEU is the default and---admit it---well-loved metric of our entire research community. +Sacre BLEU. + +# Features + +- It automatically downloads common WMT test sets and processes them to plain text +- It produces a short version string that facilitates cross-paper comparisons +- It properly computes scores on detokenized outputs, using WMT ([Conference on Machine Translation](http://statmt.org/wmt17)) standard tokenization +- It produces the same values as the official script (`mteval-v13a.pl`) used by WMT +- It outputs the BLEU score without the comma, so you don't have to remove it with `sed` (Looking at you, `multi-bleu.perl`) +- It supports different tokenizers for BLEU including support for Japanese and Chinese +- It supports **chrF, chrF++** and **Translation error rate (TER)** metrics +- It performs paired bootstrap resampling and paired approximate randomization tests for statistical significance reporting + +# Breaking Changes + +## v2.0.0 + +As of v2.0.0, the default output format is changed to `json` for less painful parsing experience. This means that software that parse the output of sacreBLEU should be modified to either (i) parse the JSON using for example the `jq` utility or (ii) pass `-f text` to sacreBLEU to preserve the old textual output. The latter change can also be made **persistently** by exporting `SACREBLEU_FORMAT=text` in relevant shell configuration files. + +Here's an example of parsing the `score` key of the JSON output using `jq`: + +``` +$ sacrebleu -i output.detok.txt -t wmt17 -l en-de | jq -r .score +20.8 +``` + +# Installation + +Install the official Python module from PyPI (**Python>=3.6 only**): + + pip install sacrebleu + +In order to install Japanese tokenizer support through `mecab-python3`, you need to run the +following command instead, to perform a full installation with dependencies: + + pip install "sacrebleu[ja]" + +In order to install Korean tokenizer support through `pymecab-ko`, you need to run the +following command instead, to perform a full installation with dependencies: + + pip install "sacrebleu[ko]" + +# Command-line Usage + +You can get a list of available test sets with `sacrebleu --list`. Please see [DATASETS.md](DATASETS.md) +for an up-to-date list of supported datasets. You can also list available test sets for a given language pair +with `sacrebleu --list -l en-fr`. + +## Basics + +### Downloading test sets + +Downloading is triggered when you request a test set. If the dataset is not available, it is downloaded +and unpacked. + +E.g., you can use the following commands to download the source, pass it through your translation system +in `translate.sh`, and then score it: + +``` +$ sacrebleu -t wmt17 -l en-de --echo src > wmt17.en-de.en +$ cat wmt17.en-de.en | translate.sh | sacrebleu -t wmt17 -l en-de +``` + +Some test sets also have the outputs of systems that were submitted to the task. +For example, the `wmt/systems` test set. + +```bash +$ sacrebleu -t wmt21/systems -l zh-en --echo NiuTrans +``` + +This provides a convenient way to score: + +```bash +$ sacrebleu -t wmt21/system -l zh-en --echo NiuTrans | sacrebleu -t wmt21/systems -l zh-en +`` + +You can see a list of the available outputs by passing an invalid value to `--echo`. + +### JSON output + +As of version `>=2.0.0`, sacreBLEU prints the computed scores in JSON format to make parsing less painful: + +``` +$ sacrebleu -i output.detok.txt -t wmt17 -l en-de +``` + +```json +{ + "name": "BLEU", + "score": 20.8, + "signature": "nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0", + "verbose_score": "54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287)", + "nrefs": "1", + "case": "mixed", + "eff": "no", + "tok": "13a", + "smooth": "exp", + "version": "2.0.0" +} +``` + +If you want to keep the old behavior, you can pass `-f text` or export `SACREBLEU_FORMAT=text`: + +``` +$ sacrebleu -i output.detok.txt -t wmt17 -l en-de -f text +BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287) +``` + +### Scoring + +(All examples below assume old-style text output for a compact representation that save space) + +Let's say that you just translated the `en-de` test set of WMT17 with your fancy MT system and the **detokenized** translations are in a file called `output.detok.txt`: + +``` +# Option 1: Redirect system output to STDIN +$ cat output.detok.txt | sacrebleu -t wmt17 -l en-de +BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287) + +# Option 2: Use the --input/-i argument +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt +BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287) +``` + +You can obtain a short version of the signature with `--short/-sh`: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -sh +BLEU|#:1|c:mixed|e:no|tok:13a|s:exp|v:2.0.0 = 20.8 54.4/26.6/14.9/8.7 (BP = 1.000 ratio = 1.026 hyp_len = 62880 ref_len = 61287) +``` + +If you only want the score to be printed, you can use the `--score-only/-b` flag: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -b +20.8 +``` + +The precision of the scores can be configured via the `--width/-w` flag: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -b -w 4 +20.7965 +``` + +### Using your own reference file + +SacreBLEU knows about common test sets (as detailed in the `--list` example above), but you can also use it to score system outputs with arbitrary references. In this case, do not forget to provide **detokenized** reference and hypotheses files: + +``` +# Let's save the reference to a text file +$ sacrebleu -t wmt17 -l en-de --echo ref > ref.detok.txt + +# Option 1: Pass the reference file as a positional argument to sacreBLEU +$ sacrebleu ref.detok.txt -i output.detok.txt -m bleu -b -w 4 +20.7965 + +# Option 2: Redirect the system into STDIN (Compatible with multi-bleu.perl way of doing things) +$ cat output.detok.txt | sacrebleu ref.detok.txt -m bleu -b -w 4 +20.7965 +``` + +### Using multiple metrics + +Let's first compute BLEU, chrF and TER with the default settings: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf ter + BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 + chrF2|nrefs:1|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 = 52.0 +TER|nrefs:1|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 69.0 +``` + +Let's now enable `chrF++` which is a revised version of chrF that takes into account word n-grams. +Observe how the `nw:0` gets changed into `nw:2` in the signature: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf ter --chrf-word-order 2 + BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 20.8 + chrF2++|nrefs:1|case:mixed|eff:yes|nc:6|nw:2|space:no|version:2.0.0 = 49.0 +TER|nrefs:1|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 69.0 +``` + +Metric-specific arguments are detailed in the output of `--help`: + +``` +BLEU related arguments: + --smooth-method {none,floor,add-k,exp}, -s {none,floor,add-k,exp} + Smoothing method: exponential decay, floor (increment zero counts), add-k (increment num/denom by k for n>1), or none. (Default: exp) + --smooth-value BLEU_SMOOTH_VALUE, -sv BLEU_SMOOTH_VALUE + The smoothing value. Only valid for floor and add-k. (Defaults: floor: 0.1, add-k: 1) + --tokenize {none,zh,13a,char,intl,ja-mecab,ko-mecab}, -tok {none,zh,13a,char,intl,ja-mecab,ko-mecab} + Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, `ja-mecab` for Japanese, `ko-mecab` for Korean and `13a` (mteval) otherwise. + --lowercase, -lc If True, enables case-insensitivity. (Default: False) + --force Insist that your tokenized input is actually detokenized. + +chrF related arguments: + --chrf-char-order CHRF_CHAR_ORDER, -cc CHRF_CHAR_ORDER + Character n-gram order. (Default: 6) + --chrf-word-order CHRF_WORD_ORDER, -cw CHRF_WORD_ORDER + Word n-gram order (Default: 0). If equals to 2, the metric is referred to as chrF++. + --chrf-beta CHRF_BETA + Determine the importance of recall w.r.t precision. (Default: 2) + --chrf-whitespace Include whitespaces when extracting character n-grams. (Default: False) + --chrf-lowercase Enable case-insensitivity. (Default: False) + --chrf-eps-smoothing Enables epsilon smoothing similar to chrF++.py, NLTK and Moses; instead of effective order smoothing. (Default: False) + +TER related arguments (The defaults replicate TERCOM's behavior): + --ter-case-sensitive Enables case sensitivity (Default: False) + --ter-asian-support Enables special treatment of Asian characters (Default: False) + --ter-no-punct Removes punctuation. (Default: False) + --ter-normalized Applies basic normalization and tokenization. (Default: False) +``` + +### Version Signatures +As you may have noticed, sacreBLEU generates version strings such as `BLEU|nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0` for reproducibility reasons. It's strongly recommended to share these signatures in your papers! + +### Outputting other metadata + +Sacrebleu knows about metadata for some test sets, and you can output it like this: + +``` +$ sacrebleu -t wmt21 -l en-de --echo src docid ref | head 2 +Couple MACED at California dog park for not wearing face masks while having lunch (VIDEO) - RT USA News rt.com.131279 Paar in Hundepark in Kalifornien mit Pfefferspray besprüht, weil es beim Mittagessen keine Masken trug (VIDEO) - RT USA News +There's mask-shaming and then there's full on assault. rt.com.131279 Masken-Shaming ist eine Sache, Körperverletzung eine andere. +``` + +If multiple fields are requested, they are output as tab-separated columns (a TSV). + +To see the available fields, add `--echo asdf` (or some other garbage data): + +``` +$ sacrebleu -t wmt21 -l en-de --echo asdf +sacreBLEU: No such field asdf in test set wmt21 for language pair en-de. +sacreBLEU: available fields for wmt21/en-de: src, ref:A, ref, docid, origlang +``` + +## Translationese Support + +If you are interested in the translationese effect, you can evaluate BLEU on a subset of sentences +with a given original language (identified based on the `origlang` tag in the raw SGM files). +E.g., to evaluate only against originally German sentences translated to English use: + + $ sacrebleu -t wmt13 -l de-en --origlang=de -i my-wmt13-output.txt + +and to evaluate against the complement (in this case `origlang` en, fr, cs, ru, de) use: + + $ sacrebleu -t wmt13 -l de-en --origlang=non-de -i my-wmt13-output.txt + +**Please note** that the evaluator will return a BLEU score only on the requested subset, +but it expects that you pass through the entire translated test set. + +## Languages & Preprocessing + +### BLEU + +- You can compute case-insensitive BLEU by passing `--lowercase` to sacreBLEU +- The default tokenizer for BLEU is `13a` which mimics the `mteval-v13a` script from Moses. +- Other tokenizers are: + - `none` which will not apply any kind of tokenization at all + - `char` for language-agnostic character-level tokenization + - `intl` applies international tokenization and mimics the `mteval-v14` script from Moses + - `zh` separates out **Chinese** characters and tokenizes the non-Chinese parts using `13a` tokenizer + - `ja-mecab` tokenizes **Japanese** inputs using the [MeCab](https://pypi.org/project/mecab-python3) morphological analyzer + - `ko-mecab` tokenizes **Korean** inputs using the [MeCab-ko](https://pypi.org/project/mecab-ko) morphological analyzer + - `flores101` and `flores200` uses the SentencePiece model built from the Flores-101 and [Flores-200](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) dataset, respectively. Note: the canonical .spm file will be automatically fetched if not found locally. +- You can switch tokenizers using the `--tokenize` flag of sacreBLEU. Alternatively, if you provide language-pair strings + using `--language-pair/-l`, `zh`, `ja-mecab` and `ko-mecab` tokenizers will be used if the target language is `zh` or `ja` or `ko`, respectively. +- **Note that** there's no automatic language detection from the hypotheses so you need to make sure that you are correctly + selecting the tokenizer for **Japanese**, **Korean** and **Chinese**. + + +Default 13a tokenizer will produce poor results for Japanese: + +``` +$ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja -b +2.1 +``` + +Let's use the `ja-mecab` tokenizer: +``` +$ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja --tokenize ja-mecab -b +14.5 +``` + +If you provide the language-pair, sacreBLEU will use ja-mecab automatically: + +``` +$ sacrebleu kyoto-test.ref.ja -i kyoto-test.hyp.ja -l en-ja -b +14.5 +``` + +### chrF / chrF++ + +chrF applies minimum to none pre-processing as it deals with character n-grams: + +- If you pass `--chrf-whitespace`, whitespace characters will be preserved when computing character n-grams. +- If you pass `--chrf-lowercase`, sacreBLEU will compute case-insensitive chrF. +- If you enable non-zero `--chrf-word-order` (pass `2` for `chrF++`), a very simple punctuation tokenization will be internally applied. + + +### TER + +Translation Error Rate (TER) has its own special tokenizer that you can configure through the command line. +The defaults provided are **compatible with the upstream TER implementation (TERCOM)** but you can nevertheless modify the +behavior through the command-line: + +- TER is by default case-insensitive. Pass `--ter-case-sensitive` to enable case-sensitivity. +- Pass `--ter-normalize` to apply a general Western tokenization +- Pass `--ter-asian-support` to enable the tokenization of Asian characters. If provided with `--ter-normalize`, + both will be applied. +- Pass `--ter-no-punct` to strip punctuation. + +## Multi-reference Evaluation + +All three metrics support the use of multiple references during evaluation. Let's first pass all references as positional arguments: + +``` +$ sacrebleu ref1 ref2 -i system -m bleu chrf ter + BLEU|nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 61.8 + chrF2|nrefs:2|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 = 75.0 +TER|nrefs:2|case:lc|tok:tercom|norm:no|punct:yes|asian:no|version:2.0.0 = 31.2 +``` + +Alternatively (less recommended), we can concatenate references using tabs as delimiters as well. Don't forget to pass `--num-refs/-nr` in this case! + +``` +$ paste ref1 ref2 > refs.tsv + +$ sacrebleu refs.tsv --num-refs 2 -i system -m bleu +BLEU|nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 = 61.8 +``` + +## Multi-system Evaluation +As of version `>=2.0.0`, SacreBLEU supports evaluation of an arbitrary number of systems for a particular +test set and language-pair. This has the advantage of seeing all results in a +nicely formatted table. + +Let's pass all system output files that match the shell glob `newstest2017.online-*` to sacreBLEU for evaluation: + +``` +$ sacrebleu -t wmt17 -l en-de -i newstest2017.online-* -m bleu chrf +╒═══════════════════════════════╤════════╤═════════╕ +│ System │ BLEU │ chrF2 │ +╞═══════════════════════════════╪════════╪═════════╡ +│ newstest2017.online-A.0.en-de │ 20.8 │ 52.0 │ +├───────────────────────────────┼────────┼─────────┤ +│ newstest2017.online-B.0.en-de │ 26.7 │ 56.3 │ +├───────────────────────────────┼────────┼─────────┤ +│ newstest2017.online-F.0.en-de │ 15.5 │ 49.3 │ +├───────────────────────────────┼────────┼─────────┤ +│ newstest2017.online-G.0.en-de │ 18.2 │ 51.6 │ +╘═══════════════════════════════╧════════╧═════════╛ + +----------------- +Metric signatures +----------------- + - BLEU nrefs:1|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 + - chrF2 nrefs:1|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 +``` + +You can also change the output format to `latex`: + +``` +$ sacrebleu -t wmt17 -l en-de -i newstest2017.online-* -m bleu chrf -f latex +\begin{tabular}{rcc} +\toprule + System & BLEU & chrF2 \\ +\midrule + newstest2017.online-A.0.en-de & 20.8 & 52.0 \\ + newstest2017.online-B.0.en-de & 26.7 & 56.3 \\ + newstest2017.online-F.0.en-de & 15.5 & 49.3 \\ + newstest2017.online-G.0.en-de & 18.2 & 51.6 \\ +\bottomrule +\end{tabular} + +... +``` + +## Confidence Intervals for Single System Evaluation + +When enabled with the `--confidence` flag, SacreBLEU will print +(1) the actual system score, (2) the true mean estimated from bootstrap resampling and (3), +the 95% [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval) around the mean. +By default, the number of bootstrap resamples is 1000 (`bs:1000` in the signature) +and can be changed with `--confidence-n`: + +``` +$ sacrebleu -t wmt17 -l en-de -i output.detok.txt -m bleu chrf --confidence -f text --short + BLEU|#:1|bs:1000|rs:12345|c:mixed|e:no|tok:13a|s:exp|v:2.0.0 = 22.675 (μ = 22.669 ± 0.598) ... +chrF2|#:1|bs:1000|rs:12345|c:mixed|e:yes|nc:6|nw:0|s:no|v:2.0.0 = 51.953 (μ = 51.953 ± 0.462) +``` + +**NOTE:** Although provided as a functionality, having access to confidence intervals for just one system +may not reveal much information about the underlying model. It often makes more sense to perform +**paired statistical tests** across multiple systems. + +**NOTE:** When resampling, the seed of the `numpy`'s random number generator (RNG) +is fixed to `12345`. If you want to relax this and set your own seed, you can +export the environment variable `SACREBLEU_SEED` to an integer. Alternatively, you can export +`SACREBLEU_SEED=None` to skip initializing the RNG's seed and allow for non-deterministic +behavior. + +## Paired Significance Tests for Multi System Evaluation +Ideally, one would have access to many systems in cases such as (1) investigating +whether a newly added feature yields significantly different scores than the baseline or +(2) evaluating submissions for a particular shared task. SacreBLEU offers two different paired significance tests that are widely used in MT research. + +### Paired bootstrap resampling (--paired-bs) + +This is an efficient implementation of the paper [Statistical Significance Tests for Machine Translation Evaluation](https://www.aclweb.org/anthology/W04-3250.pdf) and is result-compliant with the [reference Moses implementation](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/analysis/bootstrap-hypothesis-difference-significance.pl). The number of bootstrap resamples can be changed with the `--paired-bs-n` flag and its default is 1000. + +When launched, paired bootstrap resampling will perform: + - Bootstrap resampling to estimate 95% CI for all systems and the baseline + - A significance test between the **baseline** and each **system** to compute a [p-value](https://en.wikipedia.org/wiki/P-value). + +### Paired approximate randomization (--paired-ar) + +Paired approximate randomization (AR) is another type of paired significance test that is claimed to be more accurate than paired bootstrap resampling when it comes to Type-I errors ([Riezler and Maxwell III, 2005](https://www.aclweb.org/anthology/W05-0908.pdf)). Type-I errors indicate failures to reject the null hypothesis when it is true. In other words, AR should in theory be more robust to subtle changes across systems. + +Our implementation is verified to be result-compliant with the [Multeval toolkit](https://github.com/jhclark/multeval) that also uses paired AR test for pairwise comparison. The number of approximate randomization trials is set to 10,000 by default. This can be changed with the `--paired-ar-n` flag. + +### Running the tests + +- The **first system** provided to `--input/-i` will be automatically taken as the **baseline system** against which you want to compare **other systems.** +- When `--input/-i` is used, the system output files will be automatically named according to the file paths. For the sake of simplicity, SacreBLEU will automatically discard the **baseline system** if it also appears amongst **other systems**. This is useful if you would like to run the tool by passing `-i systems/baseline.txt systems/*.txt`. Here, the `baseline.txt` file will not be also considered as a candidate system. +- Alternatively, you can also use a tab-separated input file redirected to SacreBLEU. In this case, the first column hypotheses will be taken as the **baseline system**. However, this method is **not recommended** as it won't allow naming your systems in a human-readable way. It will instead enumerate the systems from 1 to N following the column order in the tab-separated input. +- On Linux and Mac OS X, you can launch the tests on multiple CPU's by passing the flag `--paired-jobs N`. If `N == 0`, SacreBLEU will launch one worker for each pairwise comparison. If `N > 0`, `N` worker processes will be spawned. This feature will substantially speed up the runtime especially if you want the **TER** metric to be computed. + +#### Example: Paired bootstrap resampling +In the example below, we select `newstest2017.LIUM-NMT.4900.en-de` as the baseline and compare it to 4 other WMT17 submissions using paired bootstrap resampling. According to the results, the null hypothesis (i.e. the two systems being essentially the same) could not be rejected (at the significance level of 0.05) for the following comparisons: + +- 0.1 BLEU difference between the baseline and the online-B system (p = 0.3077) + +``` +$ sacrebleu -t wmt17 -l en-de -i newstest2017.LIUM-NMT.4900.en-de newstest2017.online-* -m bleu chrf --paired-bs +╒════════════════════════════════════════════╤═════════════════════╤══════════════════════╕ +│ System │ BLEU (μ ± 95% CI) │ chrF2 (μ ± 95% CI) │ +╞════════════════════════════════════════════╪═════════════════════╪══════════════════════╡ +│ Baseline: newstest2017.LIUM-NMT.4900.en-de │ 26.6 (26.6 ± 0.6) │ 55.9 (55.9 ± 0.5) │ +├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤ +│ newstest2017.online-A.0.en-de │ 20.8 (20.8 ± 0.6) │ 52.0 (52.0 ± 0.4) │ +│ │ (p = 0.0010)* │ (p = 0.0010)* │ +├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤ +│ newstest2017.online-B.0.en-de │ 26.7 (26.6 ± 0.7) │ 56.3 (56.3 ± 0.5) │ +│ │ (p = 0.3077) │ (p = 0.0240)* │ +├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤ +│ newstest2017.online-F.0.en-de │ 15.5 (15.4 ± 0.5) │ 49.3 (49.3 ± 0.4) │ +│ │ (p = 0.0010)* │ (p = 0.0010)* │ +├────────────────────────────────────────────┼─────────────────────┼──────────────────────┤ +│ newstest2017.online-G.0.en-de │ 18.2 (18.2 ± 0.5) │ 51.6 (51.6 ± 0.4) │ +│ │ (p = 0.0010)* │ (p = 0.0010)* │ +╘════════════════════════════════════════════╧═════════════════════╧══════════════════════╛ + +------------------------------------------------------------ +Paired bootstrap resampling test with 1000 resampling trials +------------------------------------------------------------ + - Each system is pairwise compared to Baseline: newstest2017.LIUM-NMT.4900.en-de. + Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric. + + - Null hypothesis: the system and the baseline translations are essentially + generated by the same underlying process. For a given system and the baseline, + the p-value is roughly the probability of the absolute score difference (delta) + or higher occurring due to chance, under the assumption that the null hypothesis is correct. + + - Assuming a significance threshold of 0.05, the null hypothesis can be rejected + for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed + to chance, hence the system is significantly "different" than the baseline. + Otherwise, the p-values are highlighted in red. + + - NOTE: Significance does not tell whether a system is "better" than the baseline but rather + emphasizes the "difference" of the systems in terms of the replicability of the delta. + +----------------- +Metric signatures +----------------- + - BLEU nrefs:1|bs:1000|seed:12345|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 + - chrF2 nrefs:1|bs:1000|seed:12345|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 +``` + +#### Example: Paired approximate randomization + +Let's now run the paired approximate randomization test for the same comparison. According to the results, the findings are compatible with the paired bootstrap resampling test. However, the p-value for the `baseline vs. online-B` comparison is much higher (`0.8066`) than the paired bootstrap resampling test. + +(**Note that** the AR test does not provide confidence intervals around the true mean as it does not perform bootstrap resampling.) + +``` +$ sacrebleu -t wmt17 -l en-de -i newstest2017.LIUM-NMT.4900.en-de newstest2017.online-* -m bleu chrf --paired-ar +╒════════════════════════════════════════════╤═══════════════╤═══════════════╕ +│ System │ BLEU │ chrF2 │ +╞════════════════════════════════════════════╪═══════════════╪═══════════════╡ +│ Baseline: newstest2017.LIUM-NMT.4900.en-de │ 26.6 │ 55.9 │ +├────────────────────────────────────────────┼───────────────┼───────────────┤ +│ newstest2017.online-A.0.en-de │ 20.8 │ 52.0 │ +│ │ (p = 0.0001)* │ (p = 0.0001)* │ +├────────────────────────────────────────────┼───────────────┼───────────────┤ +│ newstest2017.online-B.0.en-de │ 26.7 │ 56.3 │ +│ │ (p = 0.8066) │ (p = 0.0385)* │ +├────────────────────────────────────────────┼───────────────┼───────────────┤ +│ newstest2017.online-F.0.en-de │ 15.5 │ 49.3 │ +│ │ (p = 0.0001)* │ (p = 0.0001)* │ +├────────────────────────────────────────────┼───────────────┼───────────────┤ +│ newstest2017.online-G.0.en-de │ 18.2 │ 51.6 │ +│ │ (p = 0.0001)* │ (p = 0.0001)* │ +╘════════════════════════════════════════════╧═══════════════╧═══════════════╛ + +------------------------------------------------------- +Paired approximate randomization test with 10000 trials +------------------------------------------------------- + - Each system is pairwise compared to Baseline: newstest2017.LIUM-NMT.4900.en-de. + Actual system score is provided for each metric. + + - Null hypothesis: the system and the baseline translations are essentially + generated by the same underlying process. For a given system and the baseline, + the p-value is roughly the probability of the absolute score difference (delta) + or higher occurring due to chance, under the assumption that the null hypothesis is correct. + + - Assuming a significance threshold of 0.05, the null hypothesis can be rejected + for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed + to chance, hence the system is significantly "different" than the baseline. + Otherwise, the p-values are highlighted in red. + + - NOTE: Significance does not tell whether a system is "better" than the baseline but rather + emphasizes the "difference" of the systems in terms of the replicability of the delta. + +----------------- +Metric signatures +----------------- + - BLEU nrefs:1|ar:10000|seed:12345|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 + - chrF2 nrefs:1|ar:10000|seed:12345|case:mixed|eff:yes|nc:6|nw:0|space:no|version:2.0.0 +``` + +# Using SacreBLEU from Python + +For evaluation, it may be useful to compute BLEU, chrF or TER from a Python script. The recommended +way of doing this is to use the object-oriented API, by creating an instance of the `metrics.BLEU` class +for example: + +```python +In [1]: from sacrebleu.metrics import BLEU, CHRF, TER + ...: + ...: refs = [ # First set of references + ...: ['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], + ...: # Second set of references + ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'], + ...: ] + ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] + +In [2]: bleu = BLEU() + +In [3]: bleu.corpus_score(sys, refs) +Out[3]: BLEU = 48.53 82.4/50.0/45.5/37.5 (BP = 0.943 ratio = 0.944 hyp_len = 17 ref_len = 18) + +In [4]: bleu.get_signature() +Out[4]: nrefs:2|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 + +In [5]: chrf = CHRF() + +In [6]: chrf.corpus_score(sys, refs) +Out[6]: chrF2 = 59.73 +``` + +### Variable Number of References + +Let's now remove the first reference sentence for the first system sentence `The dog bit the man.` by replacing it with either `None` or the empty string `''`. +This allows using a variable number of reference segments per hypothesis. Observe how the signature changes from `nrefs:2` to `nrefs:var`: + +```python +In [1]: from sacrebleu.metrics import BLEU, CHRF, TER + ...: + ...: refs = [ # First set of references + # 1st sentence does not have a ref here + ...: ['', 'It was not unexpected.', 'The man bit him first.'], + ...: # Second set of references + ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'], + ...: ] + ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] + +In [2]: bleu = BLEU() + +In [3]: bleu.corpus_score(sys, refs) +Out[3]: BLEU = 29.44 82.4/42.9/27.3/12.5 (BP = 0.889 ratio = 0.895 hyp_len = 17 ref_len = 19) + +In [4]: bleu.get_signature() +Out[4]: nrefs:var|case:mixed|eff:no|tok:13a|smooth:exp|version:2.0.0 +``` + +## Compatibility API + +You can also use the compatibility API that provides wrapper functions around the object-oriented API to +compute sentence-level and corpus-level BLEU, chrF and TER: (It should be noted that this API can be +removed in future releases) + +```python +In [1]: import sacrebleu + ...: + ...: refs = [ # First set of references + ...: ['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], + ...: # Second set of references + ...: ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.'], + ...: ] + ...: sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] + +In [2]: sacrebleu.corpus_bleu(sys, refs) +Out[2]: BLEU = 48.53 82.4/50.0/45.5/37.5 (BP = 0.943 ratio = 0.944 hyp_len = 17 ref_len = 18) +``` + +# License + +SacreBLEU is licensed under the [Apache 2.0 License](LICENSE.txt). + +# Credits + +This was all [Rico Sennrich's idea](https://twitter.com/RicoSennrich/status/883246242763026433) +Originally written by Matt Post. +New features and ongoing support provided by Martin Popel (@martinpopel) and Ozan Caglayan (@ozancaglayan). + +If you use SacreBLEU, please cite the following: + +``` +@inproceedings{post-2018-call, + title = "A Call for Clarity in Reporting {BLEU} Scores", + author = "Post, Matt", + booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", + month = oct, + year = "2018", + address = "Belgium, Brussels", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/W18-6319", + pages = "186--191", +} +``` + +# Release Notes + +- 2.4.2 (2024-04-12) + Added: + - The CLI "--echo" now will return the domain for WMT22 and WMT23. + + Fixed: + - Default reference for wmt23:en-de + +- 2.4.1 (2024-03-12) + Fixed: + - Add exports to package __init__.py + +- 2.4.0 (2023-12-11) + Added: + - WMT23 test sets (test set `wmt23`) + +- 2.3.3 (2023-11-28) + Fixed: + - Typing issues (#249, #250) + - Improved builds (#252) + +- 2.3.2 (2023-11-06) + Fixed: + - Special treatment of empty references in TER (#232) + - Bump in mecab version for JA (#234) + + Added: + - Warning if `-tok spm` is used (use explicit `flores101` instead) (#238) + +- 2.3.1 (2022-10-18) + Bugfix: + - Set lru_cache to 2^16 for SPM tokenizer (was set to infinite) + +- 2.3.0 (2022-10-18) + Features: + - (#203) Added `-tok flores101` and `-tok flores200`, a.k.a. `spbleu`. + These are multilingual tokenizations that make use of the + multilingual SPM models released by Facebook and described in the + following papers: + * Flores-101: https://arxiv.org/abs/2106.03193 + * Flores-200: https://arxiv.org/abs/2207.04672 + - (#213) Added JSON formatting for multi-system output (thanks to Manikanta Inugurthi @me-manikanta) + - (#211) You can now list all test sets for a language pair with `--list SRC-TRG`. + Thanks to Jaume Zaragoza (@ZJaume) for adding this feature. + - Added WMT22 test sets (test set `wmt22`) + - System outputs: include with wmt22. Also added wmt21/systems which will produce WMT21 submitted systems. + To see available systems, give a dummy system to `--echo`, e.g., `sacrebleu -t wmt22 -l en-de --echo ?` + +- 2.2.1 (2022-09-13) + Bugfix: Standard usage was returning (and using) each reference twice. + +- 2.2.0 (2022-07-25) + Features: + - Added WMT21 datasets (thanks to @BrighXiaoHan) + - `--echo` now exposes document metadata where available (e.g., docid, genre, origlang) + - Bugfix: allow empty references (#161) + - Adds a Korean tokenizer (thanks to @NoUnique) + + Under the hood: + - Moderate code refactoring + - Processed files have adopted a more sensible internal naming scheme under ~/.sacrebleu + (e.g., wmt17_ms.zh-en.src instead of zh-en.zh) + - Processed file extensions correspond to the values passed to `--echo` (e.g., "src") + - Now explicitly representing NoneTokenizer + - Got rid of the ".lock" lockfile for downloading (using the tarball itself) + + Many thanks to @BrightXiaoHan (https://github.com/BrightXiaoHan) for the bulk of + the code contributions in this release. + +- 2.1.0 (2022-05-19) + Features: + - Added `-tok spm` for multilingual SPM tokenization (#168) + (thanks to Naman Goyal and James Cross at Facebook) + + Fixes: + - Handle potential memory usage issues due to LRU caching in tokenizers (#167) + - Bugfix: BLEU.corpus_score() now using max_ngram_order (#173) + - Upgraded ja-mecab to 1.0.5 (#196) + +- 2.0.0 (2021-07-18) + - Build: Add Windows and OS X testing to Travis CI. + - Improve documentation and type annotations. + - Drop `Python < 3.6` support and migrate to f-strings. + - Relax `portalocker` version pinning, add `regex, tabulate, numpy` dependencies. + - Drop input type manipulation through `isinstance` checks. If the user does not obey + to the expected annotations, exceptions will be raised. Robustness attempts lead to + confusions and obfuscated score errors in the past (#121) + - Variable # references per segment is supported for all metrics by default. It is + still only available through the API. + - Use colored strings in tabular outputs (multi-system evaluation mode) through + the help of `colorama` package. + - tokenizers: Add caching to tokenizers which seem to speed up things a bit. + - `intl` tokenizer: Use `regex` module. Speed goes from ~4 seconds to ~0.6 seconds + for a particular test set evaluation. (#46) + - Signature: Formatting changed (mostly to remove '+' separator as it was + interfering with chrF++). The field separator is now '|' and key values + are separated with ':' rather than '.'. + - Signature: Boolean true / false values are shortened to yes / no. + - Signature: Number of references is `var` if variable number of references is used. + - Signature: Add effective order (yes/no) to BLEU and chrF signatures. + - Metrics: Scale all metrics into the [0, 100] range (#140) + - Metrics API: Use explicit argument names and defaults for the metrics instead of + passing obscure `argparse.Namespace` objects. + - Metrics API: A base abstract `Metric` class is introduced to guide further + metric development. This class defines the methods that should be implemented + in the derived classes and offers boilerplate methods for the common functionality. + A new metric implemented this way will automatically support significance testing. + - Metrics API: All metrics now receive an optional `references` argument at + initialization time to process and cache the references. Further evaluations + of different systems against the same references becomes faster this way + for example when using significance testing. + - BLEU: In case of no n-gram matches at all, skip smoothing and return 0.0 BLEU (#141). + - CHRF: Added multi-reference support, verified the scores against chrF++.py, added test case. + - CHRF: Added chrF+ support through `word_order` argument. Added test cases against chrF++.py. + Exposed it through the CLI (--chrf-word-order) (#124) + - CHRF: Add possibility to disable effective order smoothing (pass --chrf-eps-smoothing). + This way, the scores obtained are exactly the same as chrF++, Moses and NLTK implementations. + We keep the effective ordering as the default for compatibility, since this only + affects sentence-level scoring with very short sentences. (#144) + - CLI: `--input/-i` can now ingest multiple systems. For this reason, the positional + `references` should always preceed the `-i` flag. + - CLI: Allow modifying TER arguments through CLI. We still keep the TERCOM defaults. + - CLI: Prefix metric-specific arguments with --chrf and --ter. To maintain compatibility, + BLEU argument names are kept the same. + - CLI: Separate metric-specific arguments for clarity when `--help` is printed. + - CLI: Added `--format/-f` flag. The single-system output mode is now `json` by default. + If you want to keep the old text format persistently, you can export `SACREBLEU_FORMAT=text` into your + shell. + - CLI: For multi-system mode, `json` falls back to plain text. `latex` output can only + be generated for multi-system mode. + - CLI: sacreBLEU now supports evaluating multiple systems for a given test set + in an efficient way. Through the use of `tabulate` package, the results are + nicely rendered into a plain text table, LaTeX, HTML or RST (cf. --format/-f argument). + The systems can be either given as a list of plain text files to `-i/--input` or + as a tab-separated single stream redirected into `STDIN`. In the former case, + the basenames of the files will be automatically used as system names. + - Statistical tests: sacreBLEU now supports confidence interval estimation + through bootstrap resampling for single-system evaluation (`--confidence` flag) + as well as paired bootstrap resampling (`--paired-bs`) and paired approximate + randomization tests (`--paired-ar`) when evaluating multiple systems (#40 and #78). + +- 1.5.1 (2021-03-05) + - Fix extraction error for WMT18 extra test sets (test-ts) (#142) + - Validation and test datasets are added for multilingual TEDx + +- 1.5.0 (2021-01-15) + - Fix an assertion error in chrF (#121) + - Add missing `__repr__()` methods for BLEU and TER + - TER: Fix exception when `--short` is used (#131) + - Pin Mecab version to 1.0.3 for Python 3.5 support + - [API Change]: Default value for `floor` smoothing is now 0.1 instead of 0. + - [API Change]: `sacrebleu.sentence_bleu()` now uses the `exp` smoothing method, + exactly the same as the CLI's --sentence-level behavior. This was mainly done + to make two methods behave the same. + - Add smoothing value to BLEU signature (#98) + - dataset: Fix IWSLT links (#128) + - Allow variable number of references for BLEU (only via API) (#130). + Thanks to Ondrej Dusek (@tuetschek) + +- 1.4.14 (2020-09-13) + - Added character-based tokenization (`-tok char`). + Thanks to Christian Federmann. + - Added TER (`-m ter`). Thanks to Ales Tamchyna! (fixes #90) + - Allow calling the script as a standalone utility (fixes #86) + - Fix type annotation issues (fixes #100) and mark sacrebleu as supporting mypy + - Added WMT20 robustness test sets: + - wmt20/robust/set1 (en-ja, en-de) + - wmt20/robust/set2 (en-ja, ja-en) + - wmt20/robust/set3 (de-en) + +- 1.4.13 (2020-07-30) + - Added WMT20 newstest test sets (#103) + - Make mecab3-python an extra dependency, adapt code to new mecab3-python + This fixes the recent Windows installation issues as well (#104) + Japanese support should now be explicitly installed through sacrebleu[ja] package. + - Fix return type annotation of corpus_bleu() + - Improve sentence_score's documentation, do not allow single ref string (#98) + +- 1.4.12 (2020-07-03) + - Fix a deployment bug (#96) + +- 1.4.11 (2020-07-03) + - Added Multi30k multimodal MT test set metadata + - Refactored all tokenizers into respective classes (fixes #85) + - Refactored all metrics into respective classes + - Moved utility functions into `utils.py` + - Implemented signatures using `BLEUSignature` and `CHRFSignature` classes + - Simplified checking of Chinese characters (fixes #5) + - Unified common regexp tokenization codes for tokenizers (fixes #27) + - Fixed --detail failing when no test sets are provided + - Fixed multi-reference BLEU failing when tab-delimited reference stream is used + - Removed lowercase option for ChrF which was not functional (#85) + - Simplified ChrF and used the same I/O logic as BLEU to allow for future + multi-reference reading + - Added score regression tests for chrF using reference chrF++ implementation + - Added multi-reference & tokenizer & signature tests + +- 1.4.10 (2020-05-30) + - Fixed bug in signature with mecab tokenizer + - Cleaned up deprecation warnings (thanks to Karthikeyan Singaravelan @tirkarthi) + - Now only lists the external [typing](https://pypi.org/project/typing/) + module as a dependency for Python `<= 3.4`, as it was integrated in the standard + library in Python 3.5 (thanks to Erwan de Lépinau @ErwanDL). + - Added LICENSE to pypi (thanks to Mark Harfouche @hmaarrfk) + +- 1.4.9 (2020-04-30) + - Changed `get_available_testsets()` to return a list + - Remove Japanese MeCab tokenizer from requirements. + (Must be installed manually to avoid Windows incompatibility). + Many thanks to Makoto Morishita (@MorinoseiMorizo). + +- 1.4.8 (2020-04-26) + - Added to API: + - get_source_file() + - get_reference_files() + - get_available_testsets() + - get_langpairs_for_testset() + - Some internal refactoring + - Fixed descriptions of some WMT19/google test sets + - Added API test case (test/test_apy.py) + +- 1.4.7 (2020-04-19) + - Added Google's extra wmt19/en-de refs (-t wmt19/google/{ar,arp,hqall,hqp,hqr,wmtp}) + (Freitag, Grangier, & Caswell + BLEU might be Guilty but References are not Innocent + https://arxiv.org/abs/2004.06063) + - Restored SACREBLEU_DIR and smart_open to exports (thanks to Thomas Liao @tholiao) + +- 1.4.6 (2020-03-28) + - Large internal reorganization as a module (thanks to Thamme Gowda @thammegowda) + +- 1.4.5 (2020-03-28) + - Added Japanese MeCab tokenizer (`-tok ja-mecab`) (thanks to Makoto Morishita @MorinoseiMorizo) + - Added wmt20/dev test sets (thanks to Martin Popel @martinpopel) + +- 1.4.4 (2020-03-10) + - Smoothing changes (Sebastian Nickels @sn1c) + - Fixed bug that only applied smoothing to n-grams for n > 2 + - Added default smoothing values for methods "floor" (0) and "add-k" (1) + - `--list` now returns a list of all language pairs for a task when combined with `-t` + (e.g., `sacrebleu -t wmt19 --list`) + - added missing languages for IWSLT17 + - Minor code improvements (Thomas Liao @tholiao) + +- 1.4.3 (2019-12-02) + - Bugfix: handling of result object for CHRF + - Improved API example + +- 1.4.2 (2019-10-11) + - Tokenization variant omitted from the chrF signature; it is relevant only for BLEU (thanks to Martin Popel) + - Bugfix: call to sentence_bleu (thanks to Rachel Bawden) + - Documentation example for Python API (thanks to Vlad Lyalin) + - Calls to corpus_chrf and sentence_chrf now return a an object instead of a float (use result.score) + +- 1.4.1 (2019-09-11) + - Added sentence-level scoring via -sl (--sentence-level) + +- 1.4.0 (2019-09-10) + - Many thanks to Martin Popel for all the changes below! + - Added evaluation on concatenated test sets (e.g., `-t wmt17,wmt18`). + Works as long as they all have the same language pair. + - Added `sacrebleu --origlang` (both for evaluation on a subset and for `--echo`). + Note that while echoing prints just the subset, evaluation expects the complete + test set (and just skips the irrelevant parts). + - Added `sacrebleu --detail` for breakdown by domain-specific subsets of the test sets. + (Available for WMT19). + - Minor changes + - Improved display of `sacrebleu -h` + - Added `sacrebleu --list` + - Code refactoring + - Documentation and tests updates + - Fixed a race condition bug (`os.makedirs(outdir, exist_ok=True)` instead of `if os.path.exists`) + +- 1.3.7 (2019-07-12) + - Lazy loading of regexes cuts import time from ~1s to nearly nothing (thanks, @louismartin!) + - Added a simple (non-atomic) lock on downloading + - Can now read multiple refs from a single tab-delimited file. + You need to pass `--num-refs N` to tell it to run the split. + Only works with a single reference file passed from the command line. + +- 1.3.6 (2019-06-10) + - Removed another f-string for Python 3.5 compatibility + +- 1.3.5 (2019-06-07) + - Restored Python 3.5 compatibility + +- 1.3.4 (2019-05-28) + - Added MTNT 2019 test sets + - Added a BLEU object + +- 1.3.3 (2019-05-08) + - Added WMT'19 test sets + +- 1.3.2 (2018-04-24) + - Bugfix in test case (thanks to Adam Roberts, @adarob) + - Passing smoothing method through `sentence_bleu` + +- 1.3.1 (2019-03-20) + - Added another smoothing approach (add-k) and a command-line option for choosing the smoothing method + (`--smooth exp|floor|add-n|none`) and the associated value (`--smooth-value`), when relevant. + - Changed interface to some functions (backwards incompatible) + - 'smooth' is now 'smooth_method' + - 'smooth_floor' is now 'smooth_value' + +- 1.2.21 (19 March 2019) + - Ctrl-M characters are now treated as normal characters, previously treated as newline. + +- 1.2.20 (28 February 2018) + - Tokenization now defaults to "zh" when language pair is known + +- 1.2.19 (19 February 2019) + - Updated checksum for wmt19/dev (seems to have changed) + +- 1.2.18 (19 February 2019) + - Fixed checksum for wmt17/dev (copy-paste error) + +- 1.2.17 (6 February 2019) + - Added kk-en and en-kk to wmt19/dev + +- 1.2.16 (4 February 2019) + - Added gu-en and en-gu to wmt19/dev + +- 1.2.15 (30 January 2019) + - Added MD5 checksumming of downloaded files for all datasets. + +- 1.2.14 (22 January 2019) + - Added mtnt1.1/train mtnt1.1/valid mtnt1.1/test data from [MTNT](http://www.cs.cmu.edu/~pmichel1/mtnt/) + +- 1.2.13 (22 January 2019) + - Added 'wmt19/dev' task for 'lt-en' and 'en-lt' (development data for new tasks). + - Added MD5 checksum for downloaded tarballs. + +- 1.2.12 (8 November 2018) + - Now outputs only only digit after the decimal + +- 1.2.11 (29 August 2018) + - Added a function for sentence-level, smoothed BLEU + +- 1.2.10 (23 May 2018) + - Added wmt18 test set (with references) + +- 1.2.9 (15 May 2018) + - Added zh-en, en-zh, tr-en, and en-tr datasets for wmt18/test-ts + +- 1.2.8 (14 May 2018) + - Added wmt18/test-ts, the test sources (only) for [WMT18](http://statmt.org/wmt18/translation-task.html) + - Moved README out of `sacrebleu.py` and the CHANGELOG into a separate file + +- 1.2.7 (10 April 2018) + - fixed another locale issue (with --echo) + - grudgingly enabled `-tok none` from the command line + +- 1.2.6 (22 March 2018) + - added wmt17/ms (Microsoft's [additional ZH-EN references](https://github.com/MicrosoftTranslator/Translator-HumanParityData)). + Try `sacrebleu -t wmt17/ms --cite`. + - `--echo ref` now pastes together all references, if there is more than one + +- 1.2.5 (13 March 2018) + - added wmt18/dev datasets (en-et and et-en) + - fixed logic with --force + - locale-independent installation + - added "--echo both" (tab-delimited) + +- 1.2.3 (28 January 2018) + - metrics (`-m`) are now printed in the order requested + - chrF now prints a version string (including the beta parameter, importantly) + - attempt to remove dependence on locale setting + +- 1.2 (17 January 2018) + - added the chrF metric (`-m chrf` or `-m bleu chrf` for both) + See 'CHRF: character n-gram F-score for automatic MT evaluation' by Maja Popovic (WMT 2015) + [http://www.statmt.org/wmt15/pdf/WMT49.pdf] + - added IWSLT 2017 test and tuning sets for DE, FR, and ZH + (Thanks to Mauro Cettolo and Marcello Federico). + - added `--cite` to produce the citation for easy inclusion in papers + - added `--input` (`-i`) to set input to a file instead of STDIN + - removed accent mark after objection from UN official + +- 1.1.7 (27 November 2017) + - corpus_bleu() now raises an exception if input streams are different lengths + - thanks to Martin Popel for: + - small bugfix in tokenization_13a (not affecting WMT references) + - adding `--tok intl` (international tokenization) + - added wmt17/dev and wmt17/dev sets (for languages intro'd those years) + +- 1.1.6 (15 November 2017) + - bugfix for tokenization warning + +- 1.1.5 (12 November 2017) + - added -b option (only output the BLEU score) + - removed fi-en from list of WMT16/17 systems with more than one reference + - added WMT16/tworefs and WMT17/tworefs for scoring with both en-fi references + +- 1.1.4 (10 November 2017) + - added effective order for sentence-level BLEU computation + - added unit tests from sockeye + +- 1.1.3 (8 November 2017). + - Factored code a bit to facilitate API: + - compute_bleu: works from raw stats + - corpus_bleu for use from the command line + - raw_corpus_bleu: turns off tokenization, command-line sanity checks, floor smoothing + - Smoothing (type 'exp', now the default) fixed to produce mteval-v13a.pl results + - Added 'floor' smoothing (adds 0.01 to 0 counts, more versatile via API), 'none' smoothing (via API) + - Small bugfixes, windows compatibility (H/T Christian Federmann) + +- 1.0.3 (4 November 2017). + - Contributions from Christian Federmann: + - Added explicit support for encoding + - Fixed Windows support + - Bugfix in handling reference length with multiple refs + +- version 1.0.1 (1 November 2017). + - Small bugfix affecting some versions of Python. + - Code reformatting due to Ozan Çağlayan. + +- version 1.0 (23 October 2017). + - Support for WMT 2008--2017. + - Single tokenization (v13a) with lowercase fix (proper lower() instead of just A-Z). + - Chinese tokenization. + - Tested to match all WMT17 scores on all arcs. + diff --git a/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7a5c0f98831b19408a2b9625bc09d57f23ed85c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/RECORD @@ -0,0 +1,75 @@ +../../../bin/sacrebleu,sha256=JUTqOpJiIYrEHdUWGB94WzI084w2SHhGWLCGCXeSQTc,247 +sacrebleu-2.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +sacrebleu-2.4.2.dist-info/LICENSE.txt,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +sacrebleu-2.4.2.dist-info/METADATA,sha256=nqHo74uPqdWFe4CAQXRARYbVcrPNfYjt8pCnOV6yzEM,58040 +sacrebleu-2.4.2.dist-info/RECORD,, +sacrebleu-2.4.2.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +sacrebleu-2.4.2.dist-info/entry_points.txt,sha256=oacOmm24wUB3Xd7hB1dBABMzlZh9ReRJRNhaKu1fDlw,55 +sacrebleu-2.4.2.dist-info/top_level.txt,sha256=H3cpHXPQtQtVbKIwDN8Au2t8nwrTQlA9omGuoF7tJOA,10 +sacrebleu/__init__.py,sha256=UUPHuhC7GiZVAAQCArR_33Tgk-kBShN07vaS233ye1k,1706 +sacrebleu/__main__.py,sha256=Eim2Tft9Xcoh5PJ4n23mVnpP36UmecOtCAH1Upl5kao,1062 +sacrebleu/__pycache__/__init__.cpython-310.pyc,, +sacrebleu/__pycache__/__main__.cpython-310.pyc,, +sacrebleu/__pycache__/compat.cpython-310.pyc,, +sacrebleu/__pycache__/sacrebleu.cpython-310.pyc,, +sacrebleu/__pycache__/significance.cpython-310.pyc,, +sacrebleu/__pycache__/utils.cpython-310.pyc,, +sacrebleu/compat.py,sha256=rq8s6SgH9xgBc2uK6JXoH0HWZ6CbcpP_4_X66nQhyCs,9100 +sacrebleu/dataset/__init__.py,sha256=1KRahpk98bOFnIQaZLGDy0XgndrHjQXotCvogSvZ0RQ,106142 +sacrebleu/dataset/__main__.py,sha256=4zJ7F7mtk2LKCGorTJe0YCtZ0al5lvrJNMrfrxl3_OQ,1250 +sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/base.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc,, +sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc,, +sacrebleu/dataset/base.py,sha256=TZGsir4PvdcO8YF_MWevd3Qb95ycQB2rvtAOkYIR0XM,6724 +sacrebleu/dataset/fake_sgml.py,sha256=SW00xrlhdc9Sr9Z8Q7RF8mXVuLo3s1aNLC-zu3Crevo,4098 +sacrebleu/dataset/iwslt_xml.py,sha256=nAwIXBfbcQLbbI3Eoe2DqdjGfD1V-429_qrwuZZqmj0,210 +sacrebleu/dataset/plain_text.py,sha256=AKYCHFRtVLCoFod5pKBrqvKRxYmkqoPwUpcPup1Jg_8,1237 +sacrebleu/dataset/tsv.py,sha256=m__O5lc8GmPvKl_901bndLAOuh9a1fK1koHOKWX6h90,2179 +sacrebleu/dataset/wmt_xml.py,sha256=6xyzgctfaFWSxsUjKseWPy6YwHZekB-Ci-aUQrZFHP0,7828 +sacrebleu/metrics/__init__.py,sha256=MzC5hSbprlwwvbeu6_6_FOz3L7c1KvtBkmkgOaGVTDk,260 +sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/base.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc,, +sacrebleu/metrics/__pycache__/ter.cpython-310.pyc,, +sacrebleu/metrics/base.py,sha256=xTWUzNfXJIjnbiIYBlwhSoRgfiFYLqe84rIhYH1nSXM,16559 +sacrebleu/metrics/bleu.py,sha256=TqGdxEDu3H0P1uGKi-Y_BMNYK0xtpqtEGe7T0dJpgbs,17485 +sacrebleu/metrics/chrf.py,sha256=lhkOB3nwuSOvsub17Mct9gQ0oR038gEj5vj-1IFQhXU,10674 +sacrebleu/metrics/helpers.py,sha256=VWngO3F_9gUa4uQFH1WJm4xZqpq3CLYeAWEOeMuECx4,2339 +sacrebleu/metrics/lib_ter.py,sha256=OBJjJHmDBzq_supZW68Vma-tkSVy1tibSLw_R093ncE,16477 +sacrebleu/metrics/ter.py,sha256=TuS52VLjayllPf-EB6-8Sf7EdkIGdJ0L1QfXzCSOcBo,7769 +sacrebleu/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sacrebleu/sacrebleu.py,sha256=ZwXF3KtqBb-NGDVYclj5gkO7IlfV8-T7gM7kBDPX1VM,28458 +sacrebleu/significance.py,sha256=PDO7mCu5zL-7XNc9tA7QKs0C3TkcfrJgbKqYXwnXTp4,18329 +sacrebleu/tokenizers/__init__.py,sha256=V1unPdEJPrfDKNDYR1VSLu1bXCX0Fr8Uu8y9pNcZXGA,89 +sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc,, +sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc,, +sacrebleu/tokenizers/tokenizer_13a.py,sha256=_1ClpQPIGqRj6uaklsFegAvSZUtlbE-yztmr51dLirU,985 +sacrebleu/tokenizers/tokenizer_base.py,sha256=YNvqL3oW3rsCtUbMNat6RhlYSn2VlZuPHgYHBj6jfJg,461 +sacrebleu/tokenizers/tokenizer_char.py,sha256=ubPsBjzNXqFPJ7WEpHQ2XD2ZWWqPgeOli9ErHOxmjTw,458 +sacrebleu/tokenizers/tokenizer_intl.py,sha256=EGayKRqqcY6oV-Zstah4mrYw979W5jK0AWK8Uo3C55Q,1869 +sacrebleu/tokenizers/tokenizer_ja_mecab.py,sha256=WYHc7xtAruICGfe-D6RYmnyrXxrlHlyNXVqQVs-6l1o,1420 +sacrebleu/tokenizers/tokenizer_ko_mecab.py,sha256=BWTEkHp8d2dJOAmlqdBtLt2MHJtpOdM78CWztQ-qS00,1455 +sacrebleu/tokenizers/tokenizer_none.py,sha256=0VX-qoUM3HzuLpF59L-gHjNgmoXXXpco23zYJtwmEmw,236 +sacrebleu/tokenizers/tokenizer_re.py,sha256=OGck3pPhwibf48Dssy-6cZObsHNetwb3IIJnUYGRiAg,1271 +sacrebleu/tokenizers/tokenizer_spm.py,sha256=otBUswldtMcpE95vvIDf-74eddy35_9p88mgv9J5T9g,2096 +sacrebleu/tokenizers/tokenizer_ter.py,sha256=37KPsx0jwsSZsqZ7UJKWAYyttvqS11SxzkHZIjrZBdA,6219 +sacrebleu/tokenizers/tokenizer_zh.py,sha256=GmBe3E29uECf61dFsR_qAHNzATEkgwwp0XAqb8de9wU,4724 +sacrebleu/utils.py,sha256=lwyJiKniOQYhX5Yb2Dfe2kTROa0Fs-q-weO2kIVAz00,22530 diff --git a/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..454a21c03558b3210da0efe42581688e9cfc4b1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/top_level.txt @@ -0,0 +1 @@ +sacrebleu diff --git a/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..640b66e9f96cf8920639b5a109aa7fc951252a8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA @@ -0,0 +1,209 @@ +Metadata-Version: 2.3 +Name: tokenizers +Version: 0.19.1 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Dist: huggingface-hub >=0.16.4, <1.0 +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: requests ; extra == 'testing' +Requires-Dist: numpy ; extra == 'testing' +Requires-Dist: datasets ; extra == 'testing' +Requires-Dist: black ==22.3 ; extra == 'testing' +Requires-Dist: ruff ; extra == 'testing' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: sphinx-rtd-theme ; extra == 'docs' +Requires-Dist: setuptools-rust ; extra == 'docs' +Requires-Dist: tokenizers[testing] ; extra == 'dev' +Provides-Extra: testing +Provides-Extra: docs +Provides-Extra: dev +Keywords: NLP,tokenizer,BPE,transformer,deep learning +Author: Anthony MOI +Author-email: Nicolas Patry , Anthony Moi +Requires-Python: >=3.7 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Homepage, https://github.com/huggingface/tokenizers +Project-URL: Source, https://github.com/huggingface/tokenizers + +

+
+ +
+

+

+ + Build + + + GitHub + +

+
+ +# Tokenizers + +Provides an implementation of today's most used tokenizers, with a focus on performance and +versatility. + +Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation. +If you are interested in the High-level design, you can go check it there. + +Otherwise, let's dive in! + +## Main features: + + - Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3 + most common BPE versions). + - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes + less than 20 seconds to tokenize a GB of text on a server's CPU. + - Easy to use, but also extremely versatile. + - Designed for research and production. + - Normalization comes with alignments tracking. It's always possible to get the part of the + original sentence that corresponds to a given token. + - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. + +### Installation + +#### With pip: + +```bash +pip install tokenizers +``` + +#### From sources: + +To use this method, you need to have the Rust installed: + +```bash +# Install with: +curl https://sh.rustup.rs -sSf | sh -s -- -y +export PATH="$HOME/.cargo/bin:$PATH" +``` + +Once Rust is installed, you can compile doing the following + +```bash +git clone https://github.com/huggingface/tokenizers +cd tokenizers/bindings/python + +# Create a virtual env (you can use yours as well) +python -m venv .env +source .env/bin/activate + +# Install `tokenizers` in the current virtual env +pip install -e . +``` + +### Load a pretrained tokenizer from the Hub + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_pretrained("bert-base-cased") +``` + +### Using the provided Tokenizers + +We provide some pre-build tokenizers to cover the most common cases. You can easily load one of +these using some `vocab.json` and `merges.txt` files: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +vocab = "./path/to/vocab.json" +merges = "./path/to/merges.txt" +tokenizer = CharBPETokenizer(vocab, merges) + +# And then encode: +encoded = tokenizer.encode("I can feel the magic, can you?") +print(encoded.ids) +print(encoded.tokens) +``` + +And you can train them just as simply: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +tokenizer = CharBPETokenizer() + +# Then train it! +tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ]) + +# Now, let's use it: +encoded = tokenizer.encode("I can feel the magic, can you?") + +# And finally save it somewhere +tokenizer.save("./path/to/directory/my-bpe.tokenizer.json") +``` + +#### Provided Tokenizers + + - `CharBPETokenizer`: The original BPE + - `ByteLevelBPETokenizer`: The byte level version of the BPE + - `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece + - `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece + +All of these can be used and trained as explained above! + +### Build your own + +Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer, +by putting all the different parts you need together. +You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs. + +#### Building a byte-level BPE + +Here is an example showing how to build your own byte-level BPE by putting all the different pieces +together, and then saving it to a single file: + +```python +from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors + +# Initialize a tokenizer +tokenizer = Tokenizer(models.BPE()) + +# Customize pre-tokenization and decoding +tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) +tokenizer.decoder = decoders.ByteLevel() +tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) + +# And then train +trainer = trainers.BpeTrainer( + vocab_size=20000, + min_frequency=2, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() +) +tokenizer.train([ + "./path/to/dataset/1.txt", + "./path/to/dataset/2.txt", + "./path/to/dataset/3.txt" +], trainer=trainer) + +# And Save it +tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True) +``` + +Now, when you want to use this tokenizer, this is as simple as: + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json") + +encoded = tokenizer.encode("I can feel the magic, can you?") +``` + diff --git a/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..88de5d48c791917c7be67052af626dddf5ce3967 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD @@ -0,0 +1,45 @@ +tokenizers-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tokenizers-0.19.1.dist-info/METADATA,sha256=rCAgg9DA_ZsETxHzh_dz0hPeRKTvoj9m9kUNZe14vxc,6719 +tokenizers-0.19.1.dist-info/RECORD,, +tokenizers-0.19.1.dist-info/WHEEL,sha256=JL8sd1C0RQ2f7cmwbAn1Jp257v_vSS2r0VvTBpJeZwA,129 +tokenizers/__init__.py,sha256=ZE5ZagUvobBScrHBQdEobhx4wqM0bsq9F9aLYkBNjYQ,2615 +tokenizers/__init__.pyi,sha256=YBIWZCSN4Rs_-yKdEwhVv77bgHRE36hX9iwFrWGMJ8E,38536 +tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/decoders/__init__.py,sha256=lGp32h8qerE0F48gyZL8wGmeQVlmjVpeIsRb1SM9kf4,335 +tokenizers/decoders/__init__.pyi,sha256=xsReo7OFRCiQ4bBZY9ogYb1iLJ5DTgI5elNB-Uggocs,7244 +tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310 +tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,, +tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,, +tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,, +tokenizers/implementations/base_tokenizer.py,sha256=2TFZhLupaJiMDYGJuUNmxYJv-cnR8bDHmbMzaYpFROs,14206 +tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520 +tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289 +tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466 +tokenizers/implementations/sentencepiece_bpe.py,sha256=LwrofoohnUfME2lK2lQYoyQIhP84RP0CIlHRaj0hyNs,3738 +tokenizers/implementations/sentencepiece_unigram.py,sha256=SYiVXL8ZtqLXKpuqwnwmrfxgGotu8yAkOu7dLztEXIo,7580 +tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176 +tokenizers/models/__init__.pyi,sha256=wH4M-ZZprw3UQ98fxWrF3MpivuNVY3s3pv4pGY0A_kE,16932 +tokenizers/models/__pycache__/__init__.cpython-310.pyc,, +tokenizers/normalizers/__init__.py,sha256=hKOwnqWM-IlcVv7HDWT9SYhlczevuCNDQJY05ZFxkzk,808 +tokenizers/normalizers/__init__.pyi,sha256=5SGm-u896MZht6TXMS9sWv1lCATnwNqbC2Udl5aP4dg,19597 +tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557 +tokenizers/pre_tokenizers/__init__.pyi,sha256=IhF7dZt9_9_WM2ESKwEIvN59uW_YzS2PzmWBUScysWU,23258 +tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307 +tokenizers/processors/__init__.pyi,sha256=hx767ZY8SHhxb_hiXPRxm-f_KcoR4XDx7vfK2c0lR-Q,11357 +tokenizers/processors/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so,sha256=Pkcy8QzpfJ9ekDd03LlTwPFRj0Cyfih5kgdTEYNc784,11815960 +tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55 +tokenizers/tools/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,, +tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850 +tokenizers/tools/visualizer.py,sha256=gi-E2NCP7FuG6ujpQOdalSTXUlaV85V6NI-ZPPTvA_4,14625 +tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248 +tokenizers/trainers/__init__.pyi,sha256=3TwFKts4me7zQfVRcSTmtXYiP4XwcRjfAYtwqoZVtoQ,5382 +tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,, diff --git a/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..efc3fe1c2443c99179e12648847b89e865477d5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.5.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64