diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..307ea240cc444a1ecc687b9c045ff6022ffcf1d0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +venv/bin/python3.10 filter=lfs diff=lfs merge=lfs -text +venv/bin/python3 filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..635f18fc0749784ff5f2ec8e925f0dfd9270d6b9 --- /dev/null +++ b/ckpts/universal/global_step20/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb6b6446bdae29355f1293778938dfb08693eef71ab527eccbce78cfe31ab26a +size 9387 diff --git a/ckpts/universal/global_step20/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1d014e752e6d2ec82cd2c5959d2c99875c6998d5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/25.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6df689bb6d26916b7766779abb5d2afa813b1523c2559c25ab1aca442c6aa2ba +size 33555612 diff --git a/ckpts/universal/global_step20/zero/25.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/25.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d101f78ced23016351df66544cecf9c34081404c --- /dev/null +++ b/ckpts/universal/global_step20/zero/25.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd4cc5ad276cc1d8fbcd9bfb728fee63af0888c0dae3e53d5f56b86cc2a7a7e4 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2064baaa702d05ff84b5ac5bd2d3134f4628fd8e --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa886f1d5c04fa81c652caa99724300fa7451e94319e7320596da284226665cb +size 9387 diff --git a/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..19852d308cdf5a632091ff3484067569d760db5d --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c558cd1a5b68b5b2202cd86c94de386491908bb53050f375c441804a51133b8 +size 9293 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_abcbcba648d89e5d81a50511a6d24ddeb538de2ffe108c1370dd74ce6ac8038d.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_abcbcba648d89e5d81a50511a6d24ddeb538de2ffe108c1370dd74ce6ac8038d.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6b3a1d3f57a5a19f012439a4eb611af6a7f22ea7 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_abcbcba648d89e5d81a50511a6d24ddeb538de2ffe108c1370dd74ce6ac8038d.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9278fa1ee2540397f38cc755be8cad1277c51dc92d91aeea8c4ba1a26eb8490 +size 1773 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_f4bfe4beb605bd52a8ab6be3c9293639e7e2261d98de58159d15ccb83131bf4e.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_f4bfe4beb605bd52a8ab6be3c9293639e7e2261d98de58159d15ccb83131bf4e.pkl new file mode 100644 index 0000000000000000000000000000000000000000..03838a9bcf5b523ebbea661a3265fd5d05867143 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_f4bfe4beb605bd52a8ab6be3c9293639e7e2261d98de58159d15ccb83131bf4e.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beb2dc04ba4d834a434169fa0fe6dc5b6a20bb2b1144f25caf48703edb821ce5 +size 1911 diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f3977aa97ee069000d256c58943796e970a33fc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682598 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..67ac065ee7e4e922b3d16aaaa92074fc5a3bfbe5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:10:29:58,840 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:03,656 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:03,659 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:03,660 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +[2024-05-14 10:30:12,810] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +2024-05-14:10:30:13,204 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:10:30:13,204 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:10:30:15,041 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,041 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,060 INFO [task.py:395] Building contexts for indiccopa-hi on rank 5... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 103517.42it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 +2024-05-14:10:30:16,730 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..b4cd255ac8e017b5b5f489ba7bd13cb25efd8ee6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:29:58.688518", + "startedAt": "2024-05-14T10:29:58.322047", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3396.287855263158, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.925, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3286.727, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3287.915, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92151641845703 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e6011967adf661d00f7d085970391223077a0c73 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 25}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..5be0c67fc94bdb62aacb6023c4aaa37db89bc265 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug-internal.log @@ -0,0 +1,194 @@ +2024-05-14 10:29:58,334 INFO StreamThr :1117 [internal.py:wandb_internal():85] W&B internal server running at pid: 1117, started at: 2024-05-14 10:29:58.333979 +2024-05-14 10:29:58,336 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:29:58,338 INFO WriterThread:1117 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/run-w6h4b5nk.wandb +2024-05-14 10:29:58,339 DEBUG SenderThread:1117 [sender.py:send():378] send: header +2024-05-14 10:29:58,350 DEBUG SenderThread:1117 [sender.py:send():378] send: run +2024-05-14 10:29:58,562 INFO SenderThread:1117 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files +2024-05-14 10:29:58,562 INFO SenderThread:1117 [sender.py:_start_run_threads():1123] run started: w6h4b5nk with start time 1715682598.333776 +2024-05-14 10:29:58,574 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:29:58,574 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:29:58,656 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:29:58,658 DEBUG HandlerThread:1117 [system_info.py:__init__():26] System info init +2024-05-14 10:29:58,658 DEBUG HandlerThread:1117 [system_info.py:__init__():41] System info init done +2024-05-14 10:29:58,658 INFO HandlerThread:1117 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:29:58,658 INFO SystemMonitor:1117 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:29:58,658 INFO HandlerThread:1117 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:29:58,659 INFO SystemMonitor:1117 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:29:58,659 INFO SystemMonitor:1117 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:29:58,660 INFO SystemMonitor:1117 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:29:58,660 INFO SystemMonitor:1117 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:29:58,688 DEBUG HandlerThread:1117 [system_info.py:probe():150] Probing system +2024-05-14 10:29:58,696 DEBUG HandlerThread:1117 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:29:58,716 ERROR HandlerThread:1117 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:29:58,716 DEBUG HandlerThread:1117 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:29:58,716 DEBUG HandlerThread:1117 [system_info.py:probe():198] Probing system done +2024-05-14 10:29:58,716 DEBUG HandlerThread:1117 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:29:58.688518', 'startedAt': '2024-05-14T10:29:58.322047', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3396.287855263158, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.925, 'min': 800.0, 'max': 3400.0}, {'current': 3286.727, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3287.915, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92151641845703}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:29:58,716 INFO HandlerThread:1117 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:29:58,716 INFO HandlerThread:1117 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:29:58,718 INFO HandlerThread:1117 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:29:58,721 DEBUG SenderThread:1117 [sender.py:send():378] send: files +2024-05-14 10:29:58,721 INFO SenderThread:1117 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:29:58,834 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:29:58,834 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:29:58,834 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:29:58,835 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:29:58,936 DEBUG SenderThread:1117 [sender.py:send():378] send: telemetry +2024-05-14 10:29:59,236 INFO wandb-upload_0:1117 [upload_job.py:push():130] Uploaded file /tmp/tmpl6uzl4hfwandb/kkg3dbs7-wandb-metadata.json +2024-05-14 10:29:59,570 INFO Thread-12 :1117 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-metadata.json +2024-05-14 10:29:59,570 INFO Thread-12 :1117 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt +2024-05-14 10:29:59,570 INFO Thread-12 :1117 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:01,570 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:03,658 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:05,573 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:08,661 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:09,585 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:13,588 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:13,835 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:13,835 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:13,956 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:15,590 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:17,591 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:19,238 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:19,593 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:23,941 DEBUG SenderThread:1117 [sender.py:send():378] send: exit +2024-05-14 10:30:23,941 INFO SenderThread:1117 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 10:30:23,941 INFO SenderThread:1117 [sender.py:send_exit():587] handling runtime: 25 +2024-05-14 10:30:23,942 INFO SenderThread:1117 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,943 INFO SenderThread:1117 [sender.py:send_exit():593] send defer +2024-05-14 10:30:23,943 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,943 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:30:23,943 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,943 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:30:23,943 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:30:23,943 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,943 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:30:23,943 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,944 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:30:23,944 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:30:23,944 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,944 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:30:23,944 INFO HandlerThread:1117 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:30:23,944 DEBUG SystemMonitor:1117 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:30:23,945 DEBUG SystemMonitor:1117 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:30:23,945 INFO HandlerThread:1117 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:30:23,945 DEBUG SystemMonitor:1117 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:30:23,945 INFO HandlerThread:1117 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:30:23,947 INFO HandlerThread:1117 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:30:23,947 INFO HandlerThread:1117 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:30:23,947 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,947 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:30:23,947 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:30:23,947 DEBUG SenderThread:1117 [sender.py:send():378] send: stats +2024-05-14 10:30:23,947 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,948 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:30:23,948 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,948 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:30:23,948 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:30:23,948 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,948 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:30:23,949 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,949 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:30:23,949 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:30:23,949 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,949 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:30:23,949 DEBUG SenderThread:1117 [sender.py:send():378] send: summary +2024-05-14 10:30:23,950 INFO SenderThread:1117 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:23,950 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,950 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:30:23,950 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:30:23,950 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:23,950 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:30:23,950 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:23,950 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:30:23,953 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:24,102 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:30:24,103 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:24,103 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:30:24,103 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:24,103 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:30:24,597 INFO Thread-12 :1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml +2024-05-14 10:30:24,597 INFO Thread-12 :1117 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json +2024-05-14 10:30:24,941 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:26,827 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:30:26,827 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:26,827 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,827 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:30:26,827 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,827 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:30:26,828 INFO SenderThread:1117 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:30:26,828 INFO SenderThread:1117 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:30:26,828 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:30:26,828 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:26,828 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:30:26,828 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:26,828 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:30:26,828 INFO SenderThread:1117 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:30:26,942 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:27,598 INFO SenderThread:1117 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:27,598 INFO SenderThread:1117 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files +2024-05-14 10:30:27,599 INFO SenderThread:1117 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json wandb-summary.json +2024-05-14 10:30:27,599 INFO SenderThread:1117 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml config.yaml +2024-05-14 10:30:27,600 INFO SenderThread:1117 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt requirements.txt +2024-05-14 10:30:27,600 INFO SenderThread:1117 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log output.log +2024-05-14 10:30:27,600 INFO SenderThread:1117 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:30:27,601 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:30:27,601 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:27,602 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:27,603 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:30:27,605 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:27,605 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:30:27,605 INFO SenderThread:1117 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:27,843 INFO wandb-upload_0:1117 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/wandb-summary.json +2024-05-14 10:30:27,942 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:27,943 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:28,116 INFO wandb-upload_1:1117 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/config.yaml +2024-05-14 10:30:28,132 INFO wandb-upload_3:1117 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/output.log +2024-05-14 10:30:28,133 INFO wandb-upload_2:1117 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/files/requirements.txt +2024-05-14 10:30:28,333 INFO Thread-11 (_thread_body):1117 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:30:28,333 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:28,334 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:30:28,334 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:28,334 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:30:28,334 INFO SenderThread:1117 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:30:28,334 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:30:28,334 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:28,334 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:30:28,335 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:28,335 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:30:28,335 INFO SenderThread:1117 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:30:28,408 INFO SenderThread:1117 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:30:28,408 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:30:28,409 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:28,409 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:30:28,409 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:28,409 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:30:28,409 INFO SenderThread:1117 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:30:28,409 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:28,409 DEBUG SenderThread:1117 [sender.py:send():378] send: final +2024-05-14 10:30:28,409 INFO HandlerThread:1117 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:30:28,409 DEBUG SenderThread:1117 [sender.py:send():378] send: footer +2024-05-14 10:30:28,409 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:28,409 INFO SenderThread:1117 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:30:28,410 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:28,410 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:28,410 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:28,410 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:28,411 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:30:28,411 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:30:28,411 DEBUG SenderThread:1117 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:30:28,412 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:30:28,412 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:30:28,464 INFO MainThread:1117 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:30:28,464 INFO MainThread:1117 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:30:28,464 INFO MainThread:1117 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:30:28,464 DEBUG HandlerThread:1117 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:30:28,464 INFO HandlerThread:1117 [handler.py:finish():882] shutting down handler +2024-05-14 10:30:29,411 INFO WriterThread:1117 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/run-w6h4b5nk.wandb +2024-05-14 10:30:29,463 INFO SenderThread:1117 [sender.py:finish():1545] shutting down sender +2024-05-14 10:30:29,463 INFO SenderThread:1117 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:29,463 INFO SenderThread:1117 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..61943ff7ae57aa9e059fc7f07cd1760790134251 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Configure stats pid to 485 +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:29:58,330 WARNING MainThread:485 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:29:58,330 INFO MainThread:485 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug.log +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/logs/debug-internal.log +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:init():610] starting backend +2024-05-14 10:29:58,331 INFO MainThread:485 [wandb_init.py:init():614] setting up manager +2024-05-14 10:29:58,332 INFO MainThread:485 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:29:58,333 INFO MainThread:485 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:29:58,338 INFO MainThread:485 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:29:58,349 INFO MainThread:485 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:29:58,573 INFO MainThread:485 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:29:58,652 INFO MainThread:485 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:29:58,652 INFO MainThread:485 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:29:58,835 INFO MainThread:485 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:29:58,835 INFO MainThread:485 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:29:58,835 INFO MainThread:485 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:29:58,835 INFO MainThread:485 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:29:58,837 INFO MainThread:485 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:30:29,465 WARNING MsgRouterThr:485 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/run-w6h4b5nk.wandb b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/run-w6h4b5nk.wandb new file mode 100644 index 0000000000000000000000000000000000000000..c0a62696a63f0bae633902eabf8fe8f32dbd719e Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_102958-w6h4b5nk/run-w6h4b5nk.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..a2e80451b7a383cb63a827f05c3609e8a27c25be --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/files/wandb-summary.json @@ -0,0 +1 @@ +{"indiccopa-hi/alias": "indiccopa-hi", "indiccopa-hi/acc": 0.534521158129176, "indiccopa-hi/acc_stderr": 0.023566409283576152, "_timestamp": 1715682625.0031326, "_runtime": 22.88487458229065, "_step": 1, "evaluation/eval_results": {"_type": "table-file", "sha256": "c78c57917f215d296d9d79a9c088f03a0c26963e80eec711a3cc1777734a1c37", "size": 169, "artifact_path": "wandb-client-artifact://nuzl2uw6upej9umtg8ll62hduox5ul1dbunphsgzs76lygv8us1enpg4azbmgchu1mx9pxkblhqdj3kfoijvhszff1wfrftwiwh46y9zc3f4taijyw1ergr6g2hzeccw/evaluation/eval_results.table.json", "_latest_artifact_path": "wandb-client-artifact://vlri63xsffx2z4ze5gth7td5x2ceo4imj416bffuivyvaxzsvjg9bq6yzbtoke5zkcv9evow2o5uffw9dxhys09e56daxv1rn40uaw71fxjb1zdk9i66678jcw3np99t:latest/evaluation/eval_results.table.json", "path": "media/table/evaluation/eval_results_1_c78c57917f215d296d9d.table.json", "ncols": 7, "nrows": 1}, "_wandb": {"runtime": 22}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/run-b0385ab4.wandb b/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/run-b0385ab4.wandb new file mode 100644 index 0000000000000000000000000000000000000000..4bc6ea6c6243f86876acf655848381b4009b75e5 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103002-b0385ab4/run-b0385ab4.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9400ed6fb4b71cbea320fefea3535c49fbdff733 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715704566 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..77f6864b658fef06e3abacb19a48339e1baa8556 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:36:07,330 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:36:11,963 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:36:11,964 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:36:11,965 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c00e66be40dd80bd55d8605c68a24d983d5c5289 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:36:07.130401", + "startedAt": "2024-05-14T16:36:06.652925", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3397.368427631579, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3298.85, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3267.015, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3312.534, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3312.515, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3241.438, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3312.184, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3234.72, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3335.344, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3236.947, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3297.462, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3222.149, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4253349304199 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..74ae82ca002f26112178f0cd636ac5b92bf8e035 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 6}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..c8c4b6d70fa83f72b35493d84448a9d95b731761 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-14 16:36:06,665 INFO StreamThr :125129 [internal.py:wandb_internal():85] W&B internal server running at pid: 125129, started at: 2024-05-14 16:36:06.664430 +2024-05-14 16:36:06,667 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:36:06,668 INFO WriterThread:125129 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/run-rpovsc5l.wandb +2024-05-14 16:36:06,669 DEBUG SenderThread:125129 [sender.py:send():378] send: header +2024-05-14 16:36:06,678 DEBUG SenderThread:125129 [sender.py:send():378] send: run +2024-05-14 16:36:06,948 INFO SenderThread:125129 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files +2024-05-14 16:36:06,948 INFO SenderThread:125129 [sender.py:_start_run_threads():1123] run started: rpovsc5l with start time 1715704566.664129 +2024-05-14 16:36:06,955 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:36:06,956 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:36:07,039 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:36:07,041 DEBUG HandlerThread:125129 [system_info.py:__init__():26] System info init +2024-05-14 16:36:07,041 DEBUG HandlerThread:125129 [system_info.py:__init__():41] System info init done +2024-05-14 16:36:07,041 INFO HandlerThread:125129 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:36:07,041 INFO SystemMonitor:125129 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:36:07,042 INFO HandlerThread:125129 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:36:07,042 INFO SystemMonitor:125129 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:36:07,042 INFO SystemMonitor:125129 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:36:07,043 INFO SystemMonitor:125129 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:36:07,043 INFO SystemMonitor:125129 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:36:07,130 DEBUG HandlerThread:125129 [system_info.py:probe():150] Probing system +2024-05-14 16:36:07,139 DEBUG HandlerThread:125129 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:36:07,159 ERROR HandlerThread:125129 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:36:07,159 DEBUG HandlerThread:125129 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:36:07,159 DEBUG HandlerThread:125129 [system_info.py:probe():198] Probing system done +2024-05-14 16:36:07,159 DEBUG HandlerThread:125129 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:36:07.130401', 'startedAt': '2024-05-14T16:36:06.652925', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3397.368427631579, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3298.85, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3267.015, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3312.534, 'min': 800.0, 'max': 3400.0}, {'current': 3312.515, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3241.438, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3312.184, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3234.72, 'min': 800.0, 'max': 3400.0}, {'current': 3335.344, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3236.947, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3297.462, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3222.149, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4253349304199}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:36:07,159 INFO HandlerThread:125129 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:36:07,159 INFO HandlerThread:125129 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:36:07,160 INFO HandlerThread:125129 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:36:07,164 DEBUG SenderThread:125129 [sender.py:send():378] send: files +2024-05-14 16:36:07,164 INFO SenderThread:125129 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:36:07,323 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:36:07,323 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:36:07,324 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:36:07,325 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:36:07,403 DEBUG SenderThread:125129 [sender.py:send():378] send: telemetry +2024-05-14 16:36:07,670 INFO wandb-upload_0:125129 [upload_job.py:push():130] Uploaded file /tmp/tmpx7o30jn3wandb/fd4k7gum-wandb-metadata.json +2024-05-14 16:36:07,949 INFO Thread-12 :125129 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-metadata.json +2024-05-14 16:36:07,950 INFO Thread-12 :125129 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt +2024-05-14 16:36:07,950 INFO Thread-12 :125129 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log +2024-05-14 16:36:09,950 INFO Thread-12 :125129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log +2024-05-14 16:36:11,964 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:36:13,083 DEBUG SenderThread:125129 [sender.py:send():378] send: exit +2024-05-14 16:36:13,083 INFO SenderThread:125129 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:36:13,083 INFO SenderThread:125129 [sender.py:send_exit():587] handling runtime: 6 +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:send_exit():593] send defer +2024-05-14 16:36:13,084 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,084 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:36:13,084 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:36:13,084 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,084 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:36:13,084 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:36:13,084 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:36:13,084 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,084 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:36:13,084 INFO HandlerThread:125129 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:36:13,085 DEBUG SystemMonitor:125129 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:36:13,085 INFO HandlerThread:125129 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:36:13,085 DEBUG SystemMonitor:125129 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:36:13,085 INFO HandlerThread:125129 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:36:13,085 DEBUG SystemMonitor:125129 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:36:13,085 INFO HandlerThread:125129 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:36:13,087 INFO HandlerThread:125129 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:36:13,087 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,087 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:36:13,087 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:36:13,087 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,087 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:36:13,087 DEBUG SenderThread:125129 [sender.py:send():378] send: stats +2024-05-14 16:36:13,088 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,088 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:36:13,088 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:36:13,088 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,088 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:36:13,088 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,088 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:36:13,088 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:36:13,088 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,088 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:36:13,088 DEBUG SenderThread:125129 [sender.py:send():378] send: summary +2024-05-14 16:36:13,089 INFO SenderThread:125129 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:36:13,089 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,089 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:36:13,089 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:36:13,089 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,089 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:36:13,089 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,089 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:36:13,092 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:36:13,156 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:36:13,156 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:13,156 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:36:13,156 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:13,156 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:36:13,953 INFO Thread-12 :125129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log +2024-05-14 16:36:13,953 INFO Thread-12 :125129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml +2024-05-14 16:36:13,953 INFO Thread-12 :125129 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json +2024-05-14 16:36:14,083 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:36:15,417 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:36:15,417 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:36:15,417 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:15,417 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:36:15,417 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:15,417 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:36:15,417 INFO SenderThread:125129 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:36:15,418 INFO SenderThread:125129 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:36:15,418 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:36:15,418 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:15,418 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:36:15,418 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:15,418 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:36:15,418 INFO SenderThread:125129 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:36:15,954 INFO SenderThread:125129 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml config.yaml +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt requirements.txt +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log output.log +2024-05-14 16:36:15,955 INFO SenderThread:125129 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json wandb-summary.json +2024-05-14 16:36:15,955 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:36:15,958 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:15,958 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:36:15,960 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:15,960 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:36:15,960 INFO SenderThread:125129 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:36:16,083 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:36:16,083 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:36:16,194 INFO wandb-upload_0:125129 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/config.yaml +2024-05-14 16:36:16,474 INFO wandb-upload_2:125129 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/output.log +2024-05-14 16:36:16,481 INFO wandb-upload_3:125129 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/wandb-summary.json +2024-05-14 16:36:16,529 INFO wandb-upload_1:125129 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/files/requirements.txt +2024-05-14 16:36:16,729 INFO Thread-11 (_thread_body):125129 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:36:16,729 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:16,729 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:36:16,729 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:16,730 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:36:16,730 INFO SenderThread:125129 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:36:16,730 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:36:16,730 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:16,730 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:36:16,730 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:16,730 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:36:16,730 INFO SenderThread:125129 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:36:16,945 INFO SenderThread:125129 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:36:16,945 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:36:16,945 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:16,945 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:36:16,945 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:16,945 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:36:16,945 INFO SenderThread:125129 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:36:16,945 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:36:16,945 INFO HandlerThread:125129 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:36:16,946 DEBUG SenderThread:125129 [sender.py:send():378] send: final +2024-05-14 16:36:16,946 DEBUG SenderThread:125129 [sender.py:send():378] send: footer +2024-05-14 16:36:16,946 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: defer +2024-05-14 16:36:16,946 INFO SenderThread:125129 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:36:16,946 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:36:16,946 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:36:16,946 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:36:16,947 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:36:16,947 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:36:16,947 DEBUG SenderThread:125129 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:36:16,948 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:36:16,948 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:36:16,948 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:36:17,011 INFO MainThread:125129 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:36:17,011 INFO MainThread:125129 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:36:17,011 INFO MainThread:125129 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:36:17,011 DEBUG HandlerThread:125129 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:36:17,012 INFO HandlerThread:125129 [handler.py:finish():882] shutting down handler +2024-05-14 16:36:17,947 INFO WriterThread:125129 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/run-rpovsc5l.wandb +2024-05-14 16:36:18,011 INFO SenderThread:125129 [sender.py:finish():1545] shutting down sender +2024-05-14 16:36:18,011 INFO SenderThread:125129 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:36:18,011 INFO SenderThread:125129 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..0440e870ae8c5ddfc154626a9e4ae3ba0140b60d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Configure stats pid to 123907 +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:36:06,661 WARNING MainThread:123907 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug.log +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/logs/debug-internal.log +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:init():610] starting backend +2024-05-14 16:36:06,661 INFO MainThread:123907 [wandb_init.py:init():614] setting up manager +2024-05-14 16:36:06,663 INFO MainThread:123907 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:36:06,663 INFO MainThread:123907 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:36:06,666 INFO MainThread:123907 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:36:06,678 INFO MainThread:123907 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:36:06,955 INFO MainThread:123907 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:36:07,035 INFO MainThread:123907 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:36:07,035 INFO MainThread:123907 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:36:07,323 INFO MainThread:123907 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:36:07,324 INFO MainThread:123907 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:36:07,324 INFO MainThread:123907 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:36:07,324 INFO MainThread:123907 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:36:07,326 INFO MainThread:123907 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:36:18,013 WARNING MsgRouterThr:123907 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/run-rpovsc5l.wandb b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/run-rpovsc5l.wandb new file mode 100644 index 0000000000000000000000000000000000000000..de254c5d8372d35fca39268eefe6a8c0f9fda63d Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_163606-rpovsc5l/run-rpovsc5l.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f6c8298d7f6388c49b502891050f1a4aef6d7c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705031 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..de0319699fd2ecad7558812d88590153244ff3b5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:43:51,891 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:43:56,523 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:43:56,526 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:43:56,526 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:44:05,258 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:44:05,258 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 16:44:04,841] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:16:44:06,586 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,586 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:44:06,610 INFO [task.py:395] Building contexts for indiccopa-hi on rank 2... +100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 79191.17it/s] +2024-05-14:16:44:08,634 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..6ee4d18ad120d3883203aff716535496ae709ca0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:43:51.749811", + "startedAt": "2024-05-14T16:43:51.332078", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3391.0238092105265, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3312.457, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3312.456, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3219.594, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3210.825, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.430591583252 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..91df0012cef27fbd76437f2803da1fd4192acd69 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 24}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..8b3c5111b348587ed9c7ca486e83212045aef90d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug-internal.log @@ -0,0 +1,193 @@ +2024-05-14 16:43:51,346 INFO StreamThr :130096 [internal.py:wandb_internal():85] W&B internal server running at pid: 130096, started at: 2024-05-14 16:43:51.345807 +2024-05-14 16:43:51,348 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:43:51,349 INFO WriterThread:130096 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/run-utdjldr2.wandb +2024-05-14 16:43:51,350 DEBUG SenderThread:130096 [sender.py:send():378] send: header +2024-05-14 16:43:51,360 DEBUG SenderThread:130096 [sender.py:send():378] send: run +2024-05-14 16:43:51,584 INFO SenderThread:130096 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files +2024-05-14 16:43:51,584 INFO SenderThread:130096 [sender.py:_start_run_threads():1123] run started: utdjldr2 with start time 1715705031.345584 +2024-05-14 16:43:51,592 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:43:51,592 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:43:51,676 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:43:51,677 DEBUG HandlerThread:130096 [system_info.py:__init__():26] System info init +2024-05-14 16:43:51,677 DEBUG HandlerThread:130096 [system_info.py:__init__():41] System info init done +2024-05-14 16:43:51,677 INFO HandlerThread:130096 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:43:51,678 INFO SystemMonitor:130096 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:43:51,678 INFO HandlerThread:130096 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:43:51,678 INFO SystemMonitor:130096 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:43:51,680 INFO SystemMonitor:130096 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:43:51,681 INFO SystemMonitor:130096 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:43:51,681 INFO SystemMonitor:130096 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:43:51,749 DEBUG HandlerThread:130096 [system_info.py:probe():150] Probing system +2024-05-14 16:43:51,758 DEBUG HandlerThread:130096 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:43:51,778 ERROR HandlerThread:130096 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:43:51,778 DEBUG HandlerThread:130096 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:43:51,778 DEBUG HandlerThread:130096 [system_info.py:probe():198] Probing system done +2024-05-14 16:43:51,778 DEBUG HandlerThread:130096 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:43:51.749811', 'startedAt': '2024-05-14T16:43:51.332078', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3391.0238092105265, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3312.457, 'min': 800.0, 'max': 3400.0}, {'current': 3312.456, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3219.594, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3210.825, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.430591583252}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:43:51,778 INFO HandlerThread:130096 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:43:51,778 INFO HandlerThread:130096 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:43:51,780 INFO HandlerThread:130096 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:43:51,783 DEBUG SenderThread:130096 [sender.py:send():378] send: files +2024-05-14 16:43:51,783 INFO SenderThread:130096 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:43:51,888 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:43:51,888 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:43:51,888 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:43:51,889 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:43:52,074 DEBUG SenderThread:130096 [sender.py:send():378] send: telemetry +2024-05-14 16:43:52,285 INFO wandb-upload_0:130096 [upload_job.py:push():130] Uploaded file /tmp/tmpxkdokf7nwandb/d8iz9a7e-wandb-metadata.json +2024-05-14 16:43:52,585 INFO Thread-12 :130096 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt +2024-05-14 16:43:52,585 INFO Thread-12 :130096 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-metadata.json +2024-05-14 16:43:52,585 INFO Thread-12 :130096 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:43:54,585 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:43:56,524 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:43:58,587 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:01,527 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:04,592 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:06,587 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:06,593 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:06,888 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:44:06,889 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:44:07,594 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:08,595 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:10,596 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:11,597 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:12,153 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,881 DEBUG SenderThread:130096 [sender.py:send():378] send: exit +2024-05-14 16:44:15,881 INFO SenderThread:130096 [sender.py:send_exit():585] handling exit code: 0 +2024-05-14 16:44:15,881 INFO SenderThread:130096 [sender.py:send_exit():587] handling runtime: 24 +2024-05-14 16:44:15,884 INFO SenderThread:130096 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,884 INFO SenderThread:130096 [sender.py:send_exit():593] send defer +2024-05-14 16:44:15,884 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,884 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:44:15,884 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,884 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:44:15,884 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:44:15,885 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:44:15,885 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,885 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:44:15,885 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:44:15,885 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,885 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:44:15,885 INFO HandlerThread:130096 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130096 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:44:15,886 INFO HandlerThread:130096 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130096 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:44:15,886 INFO HandlerThread:130096 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:44:15,886 DEBUG SystemMonitor:130096 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:44:15,886 INFO HandlerThread:130096 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:44:15,888 INFO HandlerThread:130096 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:44:15,888 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,888 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:44:15,888 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:44:15,888 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,888 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:44:15,888 DEBUG SenderThread:130096 [sender.py:send():378] send: stats +2024-05-14 16:44:15,889 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,889 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:44:15,889 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:44:15,889 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,889 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:44:15,889 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,890 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:44:15,890 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:44:15,890 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,890 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:44:15,890 DEBUG SenderThread:130096 [sender.py:send():378] send: summary +2024-05-14 16:44:15,890 INFO SenderThread:130096 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:44:15,891 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,891 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:44:15,891 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:44:15,891 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,891 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:44:15,891 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,891 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:44:15,894 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:44:15,960 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:44:15,960 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:15,960 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:44:15,960 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:15,960 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:44:16,601 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml +2024-05-14 16:44:16,601 INFO Thread-12 :130096 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json +2024-05-14 16:44:16,855 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:44:16,855 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,855 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:44:16,856 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,856 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:44:16,856 INFO SenderThread:130096 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:44:16,856 INFO SenderThread:130096 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:44:16,856 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:44:16,856 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:16,856 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:44:16,856 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:16,856 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:44:16,857 INFO SenderThread:130096 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:44:16,881 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,601 INFO Thread-12 :130096 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json wandb-summary.json +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log output.log +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt requirements.txt +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml config.yaml +2024-05-14 16:44:17,602 INFO SenderThread:130096 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:44:17,602 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:44:17,602 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:17,605 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:17,606 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:44:17,608 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:17,609 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:44:17,609 INFO SenderThread:130096 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:17,841 INFO wandb-upload_1:130096 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/wandb-summary.json +2024-05-14 16:44:17,881 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:17,882 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,011 INFO wandb-upload_0:130096 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/output.log +2024-05-14 16:44:18,083 INFO wandb-upload_2:130096 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/requirements.txt +2024-05-14 16:44:18,087 INFO wandb-upload_3:130096 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/files/config.yaml +2024-05-14 16:44:18,287 INFO Thread-11 (_thread_body):130096 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:44:18,287 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,287 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:44:18,288 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,288 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:44:18,288 INFO SenderThread:130096 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:44:18,288 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:44:18,288 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,288 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:44:18,288 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,288 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:44:18,288 INFO SenderThread:130096 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:44:18,362 INFO SenderThread:130096 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:44:18,362 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:44:18,362 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,363 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:44:18,363 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,363 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:44:18,363 INFO SenderThread:130096 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:44:18,363 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:44:18,363 INFO HandlerThread:130096 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:44:18,363 DEBUG SenderThread:130096 [sender.py:send():378] send: final +2024-05-14 16:44:18,363 DEBUG SenderThread:130096 [sender.py:send():378] send: footer +2024-05-14 16:44:18,363 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: defer +2024-05-14 16:44:18,363 INFO SenderThread:130096 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:44:18,364 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,364 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,364 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:44:18,365 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:44:18,365 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:44:18,365 DEBUG SenderThread:130096 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:44:18,366 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:44:18,366 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:44:18,367 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:44:18,428 INFO MainThread:130096 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:44:18,428 INFO MainThread:130096 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:44:18,428 INFO MainThread:130096 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:44:18,429 DEBUG HandlerThread:130096 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:44:18,429 INFO HandlerThread:130096 [handler.py:finish():882] shutting down handler +2024-05-14 16:44:19,365 INFO WriterThread:130096 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/run-utdjldr2.wandb +2024-05-14 16:44:19,428 INFO SenderThread:130096 [sender.py:finish():1545] shutting down sender +2024-05-14 16:44:19,428 INFO SenderThread:130096 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:44:19,428 INFO SenderThread:130096 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..74d1b5bcce036f67e3db251f1c5c6b5b7418ebd2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:43:51,341 INFO MainThread:128889 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Configure stats pid to 128889 +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:43:51,342 WARNING MainThread:128889 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug.log +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164351-utdjldr2/logs/debug-internal.log +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:init():610] starting backend +2024-05-14 16:43:51,342 INFO MainThread:128889 [wandb_init.py:init():614] setting up manager +2024-05-14 16:43:51,344 INFO MainThread:128889 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:43:51,345 INFO MainThread:128889 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:43:51,348 INFO MainThread:128889 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:43:51,359 INFO MainThread:128889 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:43:51,591 INFO MainThread:128889 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:43:51,670 INFO MainThread:128889 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:43:51,670 INFO MainThread:128889 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:43:51,888 INFO MainThread:128889 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:43:51,888 INFO MainThread:128889 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:43:51,889 INFO MainThread:128889 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:43:51,889 INFO MainThread:128889 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:43:51,890 INFO MainThread:128889 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:44:19,429 WARNING MsgRouterThr:128889 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46cf6f00b3bd7716effe94e46d60c979920d0178 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705121 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..1ef0b7be6464e3520331e1ea6679ef8886d2c941 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:45:22,394 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:45:26,934 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:45:26,938 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:45:26,938 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..31ce0effbbf73eb31ff7e868d42b5811d592421e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:45:22.262380", + "startedAt": "2024-05-14T16:45:21.835353", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.987164473684, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3219.841, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4300231933594 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..a28078dfc56d79356054955d254ca80392df09c3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug-internal.log @@ -0,0 +1,179 @@ +2024-05-14 16:45:21,846 INFO StreamThr :137233 [internal.py:wandb_internal():85] W&B internal server running at pid: 137233, started at: 2024-05-14 16:45:21.845534 +2024-05-14 16:45:21,847 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:45:21,848 INFO WriterThread:137233 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/run-3ekply3a.wandb +2024-05-14 16:45:21,849 DEBUG SenderThread:137233 [sender.py:send():378] send: header +2024-05-14 16:45:21,859 DEBUG SenderThread:137233 [sender.py:send():378] send: run +2024-05-14 16:45:22,086 INFO SenderThread:137233 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files +2024-05-14 16:45:22,086 INFO SenderThread:137233 [sender.py:_start_run_threads():1123] run started: 3ekply3a with start time 1715705121.845464 +2024-05-14 16:45:22,093 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:45:22,093 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:45:22,177 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:45:22,179 DEBUG HandlerThread:137233 [system_info.py:__init__():26] System info init +2024-05-14 16:45:22,179 DEBUG HandlerThread:137233 [system_info.py:__init__():41] System info init done +2024-05-14 16:45:22,179 INFO HandlerThread:137233 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:45:22,179 INFO SystemMonitor:137233 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:45:22,179 INFO HandlerThread:137233 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:45:22,179 INFO SystemMonitor:137233 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:45:22,179 INFO SystemMonitor:137233 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:45:22,180 INFO SystemMonitor:137233 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:45:22,180 INFO SystemMonitor:137233 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:45:22,262 DEBUG HandlerThread:137233 [system_info.py:probe():150] Probing system +2024-05-14 16:45:22,270 DEBUG HandlerThread:137233 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:45:22,290 ERROR HandlerThread:137233 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:45:22,290 DEBUG HandlerThread:137233 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:45:22,290 DEBUG HandlerThread:137233 [system_info.py:probe():198] Probing system done +2024-05-14 16:45:22,290 DEBUG HandlerThread:137233 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:45:22.262380', 'startedAt': '2024-05-14T16:45:21.835353', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.987164473684, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.002, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3219.841, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4300231933594}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:45:22,290 INFO HandlerThread:137233 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:45:22,290 INFO HandlerThread:137233 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:45:22,291 INFO HandlerThread:137233 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:45:22,295 DEBUG SenderThread:137233 [sender.py:send():378] send: files +2024-05-14 16:45:22,295 INFO SenderThread:137233 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:45:22,390 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:45:22,391 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:45:22,391 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:45:22,392 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:45:22,608 DEBUG SenderThread:137233 [sender.py:send():378] send: telemetry +2024-05-14 16:45:22,799 INFO wandb-upload_0:137233 [upload_job.py:push():130] Uploaded file /tmp/tmpmqx5sk3ewandb/kox9jhkg-wandb-metadata.json +2024-05-14 16:45:23,087 INFO Thread-12 :137233 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt +2024-05-14 16:45:23,088 INFO Thread-12 :137233 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log +2024-05-14 16:45:23,088 INFO Thread-12 :137233 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-metadata.json +2024-05-14 16:45:25,088 INFO Thread-12 :137233 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log +2024-05-14 16:45:26,936 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:28,063 DEBUG SenderThread:137233 [sender.py:send():378] send: exit +2024-05-14 16:45:28,063 INFO SenderThread:137233 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:45:28,063 INFO SenderThread:137233 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 16:45:28,064 INFO SenderThread:137233 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:28,064 INFO SenderThread:137233 [sender.py:send_exit():593] send defer +2024-05-14 16:45:28,065 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,065 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:45:28,065 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,065 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:45:28,065 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:45:28,065 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,065 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:45:28,065 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,065 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:45:28,065 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:45:28,065 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,065 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:45:28,065 INFO HandlerThread:137233 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:45:28,065 DEBUG SystemMonitor:137233 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:45:28,066 INFO HandlerThread:137233 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:45:28,066 DEBUG SystemMonitor:137233 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:45:28,066 INFO HandlerThread:137233 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:45:28,066 DEBUG SystemMonitor:137233 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:45:28,066 INFO HandlerThread:137233 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:45:28,067 INFO HandlerThread:137233 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:45:28,068 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,068 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:45:28,068 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:45:28,068 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,068 DEBUG SenderThread:137233 [sender.py:send():378] send: stats +2024-05-14 16:45:28,068 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:45:28,069 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,069 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:45:28,069 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:45:28,069 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,069 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:45:28,069 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,069 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:45:28,069 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:45:28,069 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,069 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:45:28,069 DEBUG SenderThread:137233 [sender.py:send():378] send: summary +2024-05-14 16:45:28,070 INFO SenderThread:137233 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:28,070 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,070 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:45:28,070 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:45:28,070 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,070 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:45:28,070 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,070 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:45:28,073 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:28,089 INFO Thread-12 :137233 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json +2024-05-14 16:45:28,144 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:45:28,144 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,144 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:45:28,144 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,144 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:45:28,624 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:45:28,624 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,625 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:45:28,625 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,625 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:45:28,625 INFO SenderThread:137233 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:45:28,625 INFO SenderThread:137233 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:45:28,625 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:45:28,625 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,625 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:45:28,625 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,625 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:45:28,625 INFO SenderThread:137233 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:45:29,063 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:29,090 INFO SenderThread:137233 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log +2024-05-14 16:45:29,090 INFO SenderThread:137233 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml +2024-05-14 16:45:29,090 INFO SenderThread:137233 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files +2024-05-14 16:45:29,090 INFO SenderThread:137233 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml config.yaml +2024-05-14 16:45:29,090 INFO SenderThread:137233 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log output.log +2024-05-14 16:45:29,091 INFO SenderThread:137233 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:45:29,092 INFO SenderThread:137233 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json wandb-summary.json +2024-05-14 16:45:29,092 INFO SenderThread:137233 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt requirements.txt +2024-05-14 16:45:29,092 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:45:29,092 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:29,094 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,094 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:45:29,095 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,095 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:45:29,095 INFO SenderThread:137233 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:29,320 INFO wandb-upload_1:137233 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/output.log +2024-05-14 16:45:29,484 INFO wandb-upload_0:137233 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/config.yaml +2024-05-14 16:45:29,572 INFO wandb-upload_3:137233 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/requirements.txt +2024-05-14 16:45:29,577 INFO wandb-upload_2:137233 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/files/wandb-summary.json +2024-05-14 16:45:29,777 INFO Thread-11 (_thread_body):137233 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:45:29,778 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,778 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:45:29,778 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,778 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:45:29,778 INFO SenderThread:137233 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:45:29,778 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:45:29,778 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,778 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:45:29,779 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,779 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:45:29,779 INFO SenderThread:137233 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:45:30,031 INFO SenderThread:137233 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:45:30,031 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:45:30,031 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:30,031 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:45:30,032 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:30,032 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:45:30,032 INFO SenderThread:137233 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:45:30,032 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:30,032 INFO HandlerThread:137233 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:45:30,032 DEBUG SenderThread:137233 [sender.py:send():378] send: final +2024-05-14 16:45:30,032 DEBUG SenderThread:137233 [sender.py:send():378] send: footer +2024-05-14 16:45:30,032 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:30,032 INFO SenderThread:137233 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:45:30,033 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:30,033 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:30,033 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:30,033 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:30,033 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:45:30,033 DEBUG SenderThread:137233 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:45:30,034 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:45:30,035 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:45:30,035 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:45:30,085 INFO MainThread:137233 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:45:30,085 INFO MainThread:137233 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:45:30,085 INFO MainThread:137233 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:45:30,086 DEBUG HandlerThread:137233 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:45:30,086 INFO HandlerThread:137233 [handler.py:finish():882] shutting down handler +2024-05-14 16:45:31,033 INFO WriterThread:137233 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/run-3ekply3a.wandb +2024-05-14 16:45:31,085 INFO SenderThread:137233 [sender.py:finish():1545] shutting down sender +2024-05-14 16:45:31,085 INFO SenderThread:137233 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:31,085 INFO SenderThread:137233 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..70b7f0d47b11266a395f15fa28ae831bf4d42916 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Configure stats pid to 136013 +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:45:21,842 WARNING MainThread:136013 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:45:21,842 INFO MainThread:136013 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug.log +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/logs/debug-internal.log +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:init():610] starting backend +2024-05-14 16:45:21,843 INFO MainThread:136013 [wandb_init.py:init():614] setting up manager +2024-05-14 16:45:21,844 INFO MainThread:136013 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:45:21,845 INFO MainThread:136013 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:45:21,848 INFO MainThread:136013 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:45:21,858 INFO MainThread:136013 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:45:22,093 INFO MainThread:136013 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:45:22,172 INFO MainThread:136013 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:45:22,173 INFO MainThread:136013 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:45:22,391 INFO MainThread:136013 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:45:22,391 INFO MainThread:136013 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:45:22,391 INFO MainThread:136013 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:45:22,391 INFO MainThread:136013 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:45:22,392 INFO MainThread:136013 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:45:31,086 WARNING MsgRouterThr:136013 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/run-3ekply3a.wandb b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/run-3ekply3a.wandb new file mode 100644 index 0000000000000000000000000000000000000000..f1565fc89f41beca64ec1cf45a2b51a1e4f98e79 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164521-3ekply3a/run-3ekply3a.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02b49a55397f194da5dfa2331ae21bbacf9ecbeb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716404243 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..90e1e2bafed289dbdea3c98a80516971f4be6a4e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:57:23,840 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:57:32,340 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:57:32,341 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:57:32,342 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000'} +2024-05-22:18:57:34,633 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..899717e14f078c805e6a9aaf9bb3023fd924d056 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:57:23.629801", + "startedAt": "2024-05-22T18:57:23.098899", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2337.18309375, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.64057540893555 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..2a06cf565fa1d9486255c86dffa4c9b7c6a8a62f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 18:57:23,123 INFO StreamThr :3088 [internal.py:wandb_internal():85] W&B internal server running at pid: 3088, started at: 2024-05-22 18:57:23.122114 +2024-05-22 18:57:23,128 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:57:23,131 INFO WriterThread:3088 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/run-4wdzvb14.wandb +2024-05-22 18:57:23,132 DEBUG SenderThread:3088 [sender.py:send():378] send: header +2024-05-22 18:57:23,135 DEBUG SenderThread:3088 [sender.py:send():378] send: run +2024-05-22 18:57:23,436 INFO SenderThread:3088 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files +2024-05-22 18:57:23,436 INFO SenderThread:3088 [sender.py:_start_run_threads():1123] run started: 4wdzvb14 with start time 1716404243.122311 +2024-05-22 18:57:23,438 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:57:23,439 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:57:23,554 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:57:23,556 DEBUG HandlerThread:3088 [system_info.py:__init__():26] System info init +2024-05-22 18:57:23,556 DEBUG HandlerThread:3088 [system_info.py:__init__():41] System info init done +2024-05-22 18:57:23,556 INFO HandlerThread:3088 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:57:23,556 INFO SystemMonitor:3088 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:57:23,556 INFO HandlerThread:3088 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:57:23,563 INFO SystemMonitor:3088 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:57:23,564 INFO SystemMonitor:3088 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:57:23,565 INFO SystemMonitor:3088 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:57:23,570 INFO SystemMonitor:3088 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:57:23,629 DEBUG HandlerThread:3088 [system_info.py:probe():150] Probing system +2024-05-22 18:57:23,633 DEBUG HandlerThread:3088 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:57:23,642 ERROR HandlerThread:3088 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:57:23,642 DEBUG HandlerThread:3088 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:57:23,642 DEBUG HandlerThread:3088 [system_info.py:probe():198] Probing system done +2024-05-22 18:57:23,642 DEBUG HandlerThread:3088 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:57:23.629801', 'startedAt': '2024-05-22T18:57:23.098899', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2337.18309375, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64057540893555}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:57:23,643 INFO HandlerThread:3088 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:57:23,643 INFO HandlerThread:3088 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:57:23,646 INFO HandlerThread:3088 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:57:23,651 DEBUG SenderThread:3088 [sender.py:send():378] send: files +2024-05-22 18:57:23,651 INFO SenderThread:3088 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:57:23,832 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:57:23,832 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:57:23,833 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:57:23,836 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:57:23,946 DEBUG SenderThread:3088 [sender.py:send():378] send: telemetry +2024-05-22 18:57:24,290 INFO wandb-upload_0:3088 [upload_job.py:push():130] Uploaded file /tmp/tmphz3y2r4iwandb/bmd4kkxi-wandb-metadata.json +2024-05-22 18:57:24,437 INFO Thread-12 :3088 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log +2024-05-22 18:57:24,437 INFO Thread-12 :3088 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt +2024-05-22 18:57:24,438 INFO Thread-12 :3088 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-metadata.json +2024-05-22 18:57:26,437 INFO Thread-12 :3088 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log +2024-05-22 18:57:28,948 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:57:34,342 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:57:34,444 INFO Thread-12 :3088 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log +2024-05-22 18:57:34,640 DEBUG SenderThread:3088 [sender.py:send():378] send: exit +2024-05-22 18:57:34,640 INFO SenderThread:3088 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:57:34,640 INFO SenderThread:3088 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:57:34,642 INFO SenderThread:3088 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:57:34,642 INFO SenderThread:3088 [sender.py:send_exit():593] send defer +2024-05-22 18:57:34,642 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,642 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:57:34,642 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,642 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:57:34,642 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:57:34,642 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,642 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:57:34,642 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,643 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:57:34,643 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:57:34,643 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,643 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:57:34,643 INFO HandlerThread:3088 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:57:34,643 DEBUG SystemMonitor:3088 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:57:34,643 DEBUG SystemMonitor:3088 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:57:34,644 INFO HandlerThread:3088 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:57:34,644 DEBUG SystemMonitor:3088 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:57:34,644 INFO HandlerThread:3088 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:57:34,645 INFO HandlerThread:3088 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:57:34,645 INFO HandlerThread:3088 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:57:34,646 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,646 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:57:34,646 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:57:34,646 DEBUG SenderThread:3088 [sender.py:send():378] send: stats +2024-05-22 18:57:34,647 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,647 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:57:34,647 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,647 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:57:34,647 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:57:34,647 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,647 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:57:34,648 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,648 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:57:34,648 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:57:34,648 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,648 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:57:34,648 DEBUG SenderThread:3088 [sender.py:send():378] send: summary +2024-05-22 18:57:34,649 INFO SenderThread:3088 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:57:34,649 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,649 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:57:34,649 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:57:34,649 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,649 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:57:34,649 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,649 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:57:34,654 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:57:34,738 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:57:34,738 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:34,738 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:57:34,738 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:34,738 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:57:35,446 INFO Thread-12 :3088 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml +2024-05-22 18:57:35,446 INFO Thread-12 :3088 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json +2024-05-22 18:57:35,640 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:57:35,970 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:57:35,970 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:57:35,970 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:35,970 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:57:35,970 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:35,970 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:57:35,970 INFO SenderThread:3088 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:57:35,971 INFO SenderThread:3088 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:57:35,971 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:57:35,971 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:35,971 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:57:35,971 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:35,972 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:57:35,972 INFO SenderThread:3088 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:57:36,447 INFO SenderThread:3088 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log +2024-05-22 18:57:36,448 INFO SenderThread:3088 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files +2024-05-22 18:57:36,448 INFO SenderThread:3088 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml config.yaml +2024-05-22 18:57:36,448 INFO SenderThread:3088 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json wandb-summary.json +2024-05-22 18:57:36,450 INFO SenderThread:3088 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt requirements.txt +2024-05-22 18:57:36,451 INFO SenderThread:3088 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:57:36,451 INFO SenderThread:3088 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log output.log +2024-05-22 18:57:36,451 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:57:36,451 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:36,451 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:57:36,453 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:36,453 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:57:36,453 INFO SenderThread:3088 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:57:36,640 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:57:36,641 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:57:36,719 INFO wandb-upload_0:3088 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/config.yaml +2024-05-22 18:57:37,053 INFO wandb-upload_1:3088 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/wandb-summary.json +2024-05-22 18:57:37,061 INFO wandb-upload_2:3088 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/requirements.txt +2024-05-22 18:57:37,061 INFO wandb-upload_3:3088 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/files/output.log +2024-05-22 18:57:37,261 INFO Thread-11 (_thread_body):3088 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:57:37,261 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:37,261 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:57:37,262 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:37,262 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:57:37,262 INFO SenderThread:3088 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:57:37,262 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:57:37,262 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:37,262 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:57:37,262 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:37,262 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:57:37,262 INFO SenderThread:3088 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:57:37,326 INFO SenderThread:3088 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:57:37,326 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:57:37,326 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:37,326 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:57:37,327 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:37,327 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:57:37,327 INFO SenderThread:3088 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:57:37,327 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:57:37,327 INFO HandlerThread:3088 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:57:37,327 DEBUG SenderThread:3088 [sender.py:send():378] send: final +2024-05-22 18:57:37,327 DEBUG SenderThread:3088 [sender.py:send():378] send: footer +2024-05-22 18:57:37,327 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: defer +2024-05-22 18:57:37,327 INFO SenderThread:3088 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:57:37,328 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:57:37,329 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:57:37,329 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:57:37,329 DEBUG SenderThread:3088 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:57:37,380 INFO MainThread:3088 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:57:37,380 INFO MainThread:3088 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:57:37,380 INFO MainThread:3088 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:57:37,380 DEBUG HandlerThread:3088 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:57:37,380 INFO HandlerThread:3088 [handler.py:finish():882] shutting down handler +2024-05-22 18:57:38,328 INFO WriterThread:3088 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/run-4wdzvb14.wandb +2024-05-22 18:57:38,380 INFO SenderThread:3088 [sender.py:finish():1545] shutting down sender +2024-05-22 18:57:38,380 INFO SenderThread:3088 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:57:38,380 INFO SenderThread:3088 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..89b912d63caebd4859ad931b68f919d71cec06bd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Configure stats pid to 2933 +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:57:23,116 WARNING MainThread:2933 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:57:23,116 INFO MainThread:2933 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug.log +2024-05-22 18:57:23,117 INFO MainThread:2933 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/logs/debug-internal.log +2024-05-22 18:57:23,117 INFO MainThread:2933 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:57:23,117 INFO MainThread:2933 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:57:23,117 INFO MainThread:2933 [wandb_init.py:init():610] starting backend +2024-05-22 18:57:23,117 INFO MainThread:2933 [wandb_init.py:init():614] setting up manager +2024-05-22 18:57:23,119 INFO MainThread:2933 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:57:23,121 INFO MainThread:2933 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:57:23,125 INFO MainThread:2933 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:57:23,134 INFO MainThread:2933 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:57:23,438 INFO MainThread:2933 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:57:23,548 INFO MainThread:2933 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:57:23,548 INFO MainThread:2933 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:57:23,834 INFO MainThread:2933 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:57:23,834 INFO MainThread:2933 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:57:23,834 INFO MainThread:2933 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:57:23,834 INFO MainThread:2933 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:57:23,837 INFO MainThread:2933 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:57:38,381 WARNING MsgRouterThr:2933 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/run-4wdzvb14.wandb b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/run-4wdzvb14.wandb new file mode 100644 index 0000000000000000000000000000000000000000..d1d43dc5a8038e66f04b6c839d1f0f2b4a46f8d7 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_185723-4wdzvb14/run-4wdzvb14.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b6d93fd6b76590f58ce678bb022440f46296cdb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/config.yaml @@ -0,0 +1,32 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716445232 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/output.log b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/output.log @@ -0,0 +1 @@ + diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..068472f2f83e016b7ee5fffb6164c04eb915979c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T06:20:32.834063", + "startedAt": "2024-05-23T06:20:32.348649", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.1451500000003, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.5990219116211 + } + }, + "memory": { + "total": 1007.4379615783691 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..c83754b1023f2b61e5647f06897b7238c8df7b2b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug-internal.log @@ -0,0 +1,46 @@ +2024-05-23 06:20:32,369 INFO StreamThr :825 [internal.py:wandb_internal():85] W&B internal server running at pid: 825, started at: 2024-05-23 06:20:32.367397 +2024-05-23 06:20:32,374 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status +2024-05-23 06:20:32,374 INFO WriterThread:825 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/run-7hbwvlug.wandb +2024-05-23 06:20:32,377 DEBUG SenderThread:825 [sender.py:send():378] send: header +2024-05-23 06:20:32,381 DEBUG SenderThread:825 [sender.py:send():378] send: run +2024-05-23 06:20:32,636 INFO SenderThread:825 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files +2024-05-23 06:20:32,636 INFO SenderThread:825 [sender.py:_start_run_threads():1123] run started: 7hbwvlug with start time 1716445232.368487 +2024-05-23 06:20:32,640 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 06:20:32,640 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: check_version +2024-05-23 06:20:32,759 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 06:20:32,761 DEBUG HandlerThread:825 [system_info.py:__init__():26] System info init +2024-05-23 06:20:32,761 DEBUG HandlerThread:825 [system_info.py:__init__():41] System info init done +2024-05-23 06:20:32,761 INFO HandlerThread:825 [system_monitor.py:start():194] Starting system monitor +2024-05-23 06:20:32,762 INFO SystemMonitor:825 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 06:20:32,762 INFO HandlerThread:825 [system_monitor.py:probe():214] Collecting system info +2024-05-23 06:20:32,769 INFO SystemMonitor:825 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 06:20:32,769 INFO SystemMonitor:825 [interfaces.py:start():188] Started disk monitoring +2024-05-23 06:20:32,773 INFO SystemMonitor:825 [interfaces.py:start():188] Started memory monitoring +2024-05-23 06:20:32,777 INFO SystemMonitor:825 [interfaces.py:start():188] Started network monitoring +2024-05-23 06:20:32,834 DEBUG HandlerThread:825 [system_info.py:probe():150] Probing system +2024-05-23 06:20:32,837 DEBUG HandlerThread:825 [system_info.py:_probe_git():135] Probing git +2024-05-23 06:20:32,847 ERROR HandlerThread:825 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 06:20:32,847 DEBUG HandlerThread:825 [system_info.py:_probe_git():143] Probing git done +2024-05-23 06:20:32,847 DEBUG HandlerThread:825 [system_info.py:probe():198] Probing system done +2024-05-23 06:20:32,847 DEBUG HandlerThread:825 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T06:20:32.834063', 'startedAt': '2024-05-23T06:20:32.348649', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.1451500000003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.5990219116211}}, 'memory': {'total': 1007.4379615783691}} +2024-05-23 06:20:32,848 INFO HandlerThread:825 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 06:20:32,848 INFO HandlerThread:825 [system_monitor.py:probe():227] Publishing system info +2024-05-23 06:20:32,851 INFO HandlerThread:825 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 06:20:32,857 DEBUG SenderThread:825 [sender.py:send():378] send: files +2024-05-23 06:20:32,857 INFO SenderThread:825 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 06:20:33,036 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 06:20:33,036 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: python_packages +2024-05-23 06:20:33,038 DEBUG SenderThread:825 [sender.py:send():378] send: telemetry +2024-05-23 06:20:33,038 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 06:20:33,039 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: stop_status +2024-05-23 06:20:33,406 INFO wandb-upload_0:825 [upload_job.py:push():130] Uploaded file /tmp/tmpc2g1_28fwandb/rxmz3u61-wandb-metadata.json +2024-05-23 06:20:33,639 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/wandb-metadata.json +2024-05-23 06:20:33,639 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/output.log +2024-05-23 06:20:33,639 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/requirements.txt +2024-05-23 06:20:35,639 INFO Thread-12 :825 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/files/output.log +2024-05-23 06:20:38,202 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status_report diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..7a6b62849d75389da38dabb12a317c17f609ddf7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Configure stats pid to 669 +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 06:20:32,362 WARNING MainThread:669 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug.log +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/logs/debug-internal.log +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:init():560] calling init triggers +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:init():610] starting backend +2024-05-23 06:20:32,362 INFO MainThread:669 [wandb_init.py:init():614] setting up manager +2024-05-23 06:20:32,364 INFO MainThread:669 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 06:20:32,368 INFO MainThread:669 [wandb_init.py:init():622] backend started and connected +2024-05-23 06:20:32,372 INFO MainThread:669 [wandb_init.py:init():711] updated telemetry +2024-05-23 06:20:32,380 INFO MainThread:669 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 06:20:32,640 INFO MainThread:669 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 06:20:32,753 INFO MainThread:669 [wandb_run.py:_on_init():2405] got version response +2024-05-23 06:20:32,753 INFO MainThread:669 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 06:20:33,037 INFO MainThread:669 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 06:20:33,037 INFO MainThread:669 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 06:20:33,037 INFO MainThread:669 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 06:20:33,037 INFO MainThread:669 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 06:20:33,041 INFO MainThread:669 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/run-7hbwvlug.wandb b/lm-evaluation-harness/wandb/run-20240523_062032-7hbwvlug/run-7hbwvlug.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/bin/deepspeed.pt b/venv/bin/deepspeed.pt new file mode 100644 index 0000000000000000000000000000000000000000..7f7e5eeef243ecc8481a6a0c775a37809a6c6c31 --- /dev/null +++ b/venv/bin/deepspeed.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8448ec51b5b0716759ca3c96145ee2382401ad339f19b39afbe43b2fc8281344 +size 131 diff --git a/venv/bin/python3 b/venv/bin/python3 new file mode 100644 index 0000000000000000000000000000000000000000..64e8728adfafdb3e95b983fb0960f9a09cd3bac9 --- /dev/null +++ b/venv/bin/python3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91 +size 5904904 diff --git a/venv/bin/python3.10 b/venv/bin/python3.10 new file mode 100644 index 0000000000000000000000000000000000000000..64e8728adfafdb3e95b983fb0960f9a09cd3bac9 --- /dev/null +++ b/venv/bin/python3.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91 +size 5904904 diff --git a/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3fc723bb7bb61d73562595210ddb57c130cefb53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/METADATA @@ -0,0 +1,378 @@ +Metadata-Version: 2.1 +Name: accelerate +Version: 0.29.3 +Summary: Accelerate +Home-page: https://github.com/huggingface/accelerate +Author: The HuggingFace team +Author-email: zach.mueller@huggingface.co +License: Apache +Keywords: deep learning +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy (>=1.17) +Requires-Dist: packaging (>=20.0) +Requires-Dist: psutil +Requires-Dist: pyyaml +Requires-Dist: torch (>=1.10.0) +Requires-Dist: huggingface-hub +Requires-Dist: safetensors (>=0.3.1) +Provides-Extra: dev +Requires-Dist: black (~=23.1) ; extra == 'dev' +Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'dev' +Requires-Dist: ruff (~=0.2.1) ; extra == 'dev' +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Requires-Dist: pytest-subtests ; extra == 'dev' +Requires-Dist: parameterized ; extra == 'dev' +Requires-Dist: datasets ; extra == 'dev' +Requires-Dist: evaluate ; extra == 'dev' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'dev' +Requires-Dist: transformers ; extra == 'dev' +Requires-Dist: scipy ; extra == 'dev' +Requires-Dist: scikit-learn ; extra == 'dev' +Requires-Dist: deepspeed ; extra == 'dev' +Requires-Dist: tqdm ; extra == 'dev' +Requires-Dist: bitsandbytes ; extra == 'dev' +Requires-Dist: timm ; extra == 'dev' +Requires-Dist: rich ; extra == 'dev' +Provides-Extra: docs +Provides-Extra: quality +Requires-Dist: black (~=23.1) ; extra == 'quality' +Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'quality' +Requires-Dist: ruff (~=0.2.1) ; extra == 'quality' +Provides-Extra: rich +Requires-Dist: rich ; extra == 'rich' +Provides-Extra: sagemaker +Requires-Dist: sagemaker ; extra == 'sagemaker' +Provides-Extra: test_dev +Requires-Dist: datasets ; extra == 'test_dev' +Requires-Dist: evaluate ; extra == 'test_dev' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'test_dev' +Requires-Dist: transformers ; extra == 'test_dev' +Requires-Dist: scipy ; extra == 'test_dev' +Requires-Dist: scikit-learn ; extra == 'test_dev' +Requires-Dist: deepspeed ; extra == 'test_dev' +Requires-Dist: tqdm ; extra == 'test_dev' +Requires-Dist: bitsandbytes ; extra == 'test_dev' +Requires-Dist: timm ; extra == 'test_dev' +Provides-Extra: test_prod +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'test_prod' +Requires-Dist: pytest-xdist ; extra == 'test_prod' +Requires-Dist: pytest-subtests ; extra == 'test_prod' +Requires-Dist: parameterized ; extra == 'test_prod' +Provides-Extra: test_trackers +Requires-Dist: wandb ; extra == 'test_trackers' +Requires-Dist: comet-ml ; extra == 'test_trackers' +Requires-Dist: tensorboard ; extra == 'test_trackers' +Requires-Dist: dvclive ; extra == 'test_trackers' +Provides-Extra: testing +Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'testing' +Requires-Dist: pytest-xdist ; extra == 'testing' +Requires-Dist: pytest-subtests ; extra == 'testing' +Requires-Dist: parameterized ; extra == 'testing' +Requires-Dist: datasets ; extra == 'testing' +Requires-Dist: evaluate ; extra == 'testing' +Requires-Dist: torchpippy (>=0.2.0) ; extra == 'testing' +Requires-Dist: transformers ; extra == 'testing' +Requires-Dist: scipy ; extra == 'testing' +Requires-Dist: scikit-learn ; extra == 'testing' +Requires-Dist: deepspeed ; extra == 'testing' +Requires-Dist: tqdm ; extra == 'testing' +Requires-Dist: bitsandbytes ; extra == 'testing' +Requires-Dist: timm ; extra == 'testing' + + + +

+
+ +
+

+ +

+ + + License + + + Documentation + + + GitHub release + + + Contributor Covenant + +

+ +

+

Run your *raw* PyTorch training script on any kind of device +

+ +

+ +

+ +## Easy to integrate + +🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16. + +🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged. + +Here is an example: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + ++ accelerator = Accelerator() +- device = 'cpu' ++ device = accelerator.device + + model = torch.nn.Transformer().to(device) + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: + source = source.to(device) + targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16). + +In particular, the same code can then be run without modification on your local machine for debugging or your training environment. + +🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further: + +```diff + import torch + import torch.nn.functional as F + from datasets import load_dataset ++ from accelerate import Accelerator + +- device = 'cpu' ++ accelerator = Accelerator() + +- model = torch.nn.Transformer().to(device) ++ model = torch.nn.Transformer() + optimizer = torch.optim.Adam(model.parameters()) + + dataset = load_dataset('my_dataset') + data = torch.utils.data.DataLoader(dataset, shuffle=True) + ++ model, optimizer, data = accelerator.prepare(model, optimizer, data) + + model.train() + for epoch in range(10): + for source, targets in data: +- source = source.to(device) +- targets = targets.to(device) + + optimizer.zero_grad() + + output = model(source) + loss = F.cross_entropy(output, targets) + +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() +``` + +Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples). + +## Launching script + +🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training! +On your machine(s) just run: + +```bash +accelerate config +``` + +and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing + +```bash +accelerate launch my_script.py --args_to_my_script +``` + +For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo): + +```bash +accelerate launch examples/nlp_example.py +``` + +This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience. + +You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`. + +For example, here is how to launch on two GPUs: + +```bash +accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py +``` + +To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli). + +## Launching multi-CPU run using MPI + +🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well. +Once you have MPI setup on your cluster, just run: +```bash +accelerate config +``` +Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun. +Then, use `accelerate launch` with your script like: +```bash +accelerate launch examples/nlp_example.py +``` +Alternatively, you can use mpirun directly, without using the CLI like: +```bash +mpirun -np 2 python examples/nlp_example.py +``` + +## Launching training using DeepSpeed + +🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`. + +```python +from accelerate import Accelerator, DeepSpeedPlugin + +# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it +# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed +deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2) +accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin) + +# How to save your 🤗 Transformer? +accelerator.wait_for_everyone() +unwrapped_model = accelerator.unwrap_model(model) +unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model)) +``` + +Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue. + +## Launching your training from a notebook + +🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add: + +```python +from accelerate import notebook_launcher + +notebook_launcher(training_function) +``` + +An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) + +## Why should I use 🤗 Accelerate? + +You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object. + +## Why shouldn't I use 🤗 Accelerate? + +You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them. + +## Frameworks using 🤗 Accelerate + +If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below: + +* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development. +* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76). +* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic. +* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms. +* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses. +* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products. +* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library. +* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so. +* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves! +* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion. +* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric. +* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side). + + +## Installation + +This repository is tested on Python 3.8+ and PyTorch 1.10.0+ + +You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). + +First, create a virtual environment with the version of Python you're going to use and activate it. + +Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows: + +```bash +pip install accelerate +``` + +## Supported integrations + +- CPU only +- multi-CPU on one node (machine) +- multi-CPU on several nodes (machines) +- single GPU +- multi-GPU on one node (machine) +- multi-GPU on several nodes (machines) +- TPU +- FP16/BFloat16 mixed precision +- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) +- DeepSpeed support (Experimental) +- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental) +- Megatron-LM support (Experimental) + +## Citing 🤗 Accelerate + +If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry. + +```bibtex +@Misc{accelerate, + title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.}, + author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan}, + howpublished = {\url{https://github.com/huggingface/accelerate}}, + year = {2022} +} +``` diff --git a/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..db491c83cf468945b19a098b171fcbfa7d7db1d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +accelerate = accelerate.commands.accelerate_cli:main +accelerate-config = accelerate.commands.config:main +accelerate-estimate-memory = accelerate.commands.estimate:main +accelerate-launch = accelerate.commands.launch:main diff --git a/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a9368375be0e0e13fdad0eea4b92541bd9e1f594 --- /dev/null +++ b/venv/lib/python3.10/site-packages/accelerate-0.29.3.dist-info/top_level.txt @@ -0,0 +1 @@ +accelerate diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz new file mode 100644 index 0000000000000000000000000000000000000000..878decdcad534f6d2cdd14a487c207f8c6133261 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a56c3fc6e0db3a4102aaed4a19fd4e154eecd956f30b6bf9179897844ed3c01e +size 790 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz new file mode 100644 index 0000000000000000000000000000000000000000..cec2871b09ae347e07c81eb55e7979300748ccd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02cf30d8b196c303662b2dd035d2a58caeb762ae3a82345ffd1274961e7f5aa0 +size 752 diff --git a/venv/lib/python3.10/site-packages/requests/__init__.py b/venv/lib/python3.10/site-packages/requests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..300a16c5741d9ccb751185407694fe49e8da6bc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/__init__.py @@ -0,0 +1,180 @@ +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import warnings + +import urllib3 + +from .exceptions import RequestsDependencyWarning + +try: + from charset_normalizer import __version__ as charset_normalizer_version +except ImportError: + charset_normalizer_version = None + +try: + from chardet import __version__ as chardet_version +except ImportError: + chardet_version = None + + +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append("0") + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + raise Exception("You need either charset_normalizer or chardet installed") + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split("."))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) + warnings.warn(warning, RequestsDependencyWarning) + + +# Check imported dependencies for compatibility. +try: + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) +except (AssertionError, ValueError): + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) + +# Attempt to enable urllib3's fallback for SNI support +# if the standard library doesn't support SNI or the +# 'ssl' library isn't available. +try: + try: + import ssl + except ImportError: + ssl = None + + if not getattr(ssl, "HAS_SNI", False): + from urllib3.contrib import pyopenssl + + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from urllib3.exceptions import DependencyWarning + +warnings.simplefilter("ignore", DependencyWarning) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/venv/lib/python3.10/site-packages/requests/_internal_utils.py b/venv/lib/python3.10/site-packages/requests/_internal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cf635e2937ee9b123a1498c5c5f723a6e20084 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/_internal_utils.py @@ -0,0 +1,50 @@ +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" +import re + +from .compat import builtin_str + +_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") +_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") +_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") +_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") + +_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) +_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) +HEADER_VALIDATORS = { + bytes: _HEADER_VALIDATORS_BYTE, + str: _HEADER_VALIDATORS_STR, +} + + +def to_native_string(string, encoding="ascii"): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode("ascii") + return True + except UnicodeEncodeError: + return False diff --git a/venv/lib/python3.10/site-packages/requests/auth.py b/venv/lib/python3.10/site-packages/requests/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..9733686ddb36b826ead4f4666d42311397fa6fec --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/auth.py @@ -0,0 +1,315 @@ +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import hashlib +import os +import re +import threading +import time +import warnings +from base64 import b64encode + +from ._internal_utils import to_native_string +from .compat import basestring, str, urlparse +from .cookies import extract_cookies_to_jar +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(type(password)), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode("latin1") + + if isinstance(password, str): + password = password.encode("latin1") + + authstr = "Basic " + to_native_string( + b64encode(b":".join((username, password))).strip() + ) + + return authstr + + +class AuthBase: + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError("Auth hooks must be callable.") + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers["Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, "init"): + self._thread_local.init = True + self._thread_local.last_nonce = "" + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal["realm"] + nonce = self._thread_local.chal["nonce"] + qop = self._thread_local.chal.get("qop") + algorithm = self._thread_local.chal.get("algorithm") + opaque = self._thread_local.chal.get("opaque") + hash_utf8 = None + + if algorithm is None: + _algorithm = "MD5" + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == "MD5" or _algorithm == "MD5-SESS": + + def md5_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.md5(x).hexdigest() + + hash_utf8 = md5_utf8 + elif _algorithm == "SHA": + + def sha_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha1(x).hexdigest() + + hash_utf8 = sha_utf8 + elif _algorithm == "SHA-256": + + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha256(x).hexdigest() + + hash_utf8 = sha256_utf8 + elif _algorithm == "SHA-512": + + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode("utf-8") + return hashlib.sha512(x).hexdigest() + + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += f"?{p_parsed.query}" + + A1 = f"{self.username}:{realm}:{self.password}" + A2 = f"{method}:{path}" + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = f"{self._thread_local.nonce_count:08x}" + s = str(self._thread_local.nonce_count).encode("utf-8") + s += nonce.encode("utf-8") + s += time.ctime().encode("utf-8") + s += os.urandom(8) + + cnonce = hashlib.sha1(s).hexdigest()[:16] + if _algorithm == "MD5-SESS": + HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") + + if not qop: + respdig = KD(HA1, f"{nonce}:{HA2}") + elif qop == "auth" or "auth" in qop.split(","): + noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = ( + f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' + f'uri="{path}", response="{respdig}"' + ) + if opaque: + base += f', opaque="{opaque}"' + if algorithm: + base += f', algorithm="{algorithm}"' + if entdig: + base += f', digest="{entdig}"' + if qop: + base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' + + return f"Digest {base}" + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/psf/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get("www-authenticate", "") + + if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: + + self._thread_local.num_401_calls += 1 + pat = re.compile(r"digest ", flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers["Authorization"] = self.build_digest_header( + prep.method, prep.url + ) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers["Authorization"] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook("response", self.handle_401) + r.register_hook("response", self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all( + [ + self.username == getattr(other, "username", None), + self.password == getattr(other, "password", None), + ] + ) + + def __ne__(self, other): + return not self == other diff --git a/venv/lib/python3.10/site-packages/requests/certs.py b/venv/lib/python3.10/site-packages/requests/certs.py new file mode 100644 index 0000000000000000000000000000000000000000..be422c3e91e43bacf60ff3302688df0b28742333 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/certs.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from certifi import where + +if __name__ == "__main__": + print(where()) diff --git a/venv/lib/python3.10/site-packages/requests/cookies.py b/venv/lib/python3.10/site-packages/requests/cookies.py new file mode 100644 index 0000000000000000000000000000000000000000..bf54ab237e410603061b8cec8fd195912d3cfb08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/cookies.py @@ -0,0 +1,561 @@ +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `cookielib.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import calendar +import copy +import time + +from ._internal_utils import to_native_string +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest: + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `cookielib.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get("Host"): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers["Host"], encoding="utf-8") + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookielib has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse: + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `cookielib` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookielib` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, "_original_response") and response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get("Cookie") + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a cookielib.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super().__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super().update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value + + if toReturn: + return toReturn + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop("_cookies_lock") + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if "_cookies_lock" not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, "copy"): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + + badargs = set(kwargs) - set(result) + if badargs: + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) + + result.update(kwargs) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel["max-age"]: + try: + expires = int(time.time() + int(morsel["max-age"])) + except ValueError: + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + return create_cookie( + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), + discard=False, + domain=morsel["domain"], + expires=expires, + name=morsel.key, + path=morsel["path"], + port=None, + rest={"HttpOnly": morsel["httponly"]}, + rfc2109=False, + secure=bool(morsel["secure"]), + value=morsel.value, + version=morsel["version"] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError("You can only merge into CookieJar") + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/venv/lib/python3.10/site-packages/requests/help.py b/venv/lib/python3.10/site-packages/requests/help.py new file mode 100644 index 0000000000000000000000000000000000000000..8fbcd6560a8fe2c8a07e3bd1441a81e0db9cb689 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/help.py @@ -0,0 +1,134 @@ +"""Module containing bug report helper(s).""" + +import json +import platform +import ssl +import sys + +import idna +import urllib3 + +from . import __version__ as requests_version + +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +try: + import chardet +except ImportError: + chardet = None + +try: + from urllib3.contrib import pyopenssl +except ImportError: + pyopenssl = None + OpenSSL = None + cryptography = None +else: + import cryptography + import OpenSSL + + +def _implementation(): + """Return a dict with the Python implementation and version. + + Provide both the name and the version of the Python implementation + currently running. For example, on CPython 3.10.3 it will return + {'name': 'CPython', 'version': '3.10.3'}. + + This function works best on CPython and PyPy: in particular, it probably + doesn't work for Jython or IronPython. Future investigation should be done + to work out the correct shape of the code for those platforms. + """ + implementation = platform.python_implementation() + + if implementation == "CPython": + implementation_version = platform.python_version() + elif implementation == "PyPy": + implementation_version = "{}.{}.{}".format( + sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro, + ) + if sys.pypy_version_info.releaselevel != "final": + implementation_version = "".join( + [implementation_version, sys.pypy_version_info.releaselevel] + ) + elif implementation == "Jython": + implementation_version = platform.python_version() # Complete Guess + elif implementation == "IronPython": + implementation_version = platform.python_version() # Complete Guess + else: + implementation_version = "Unknown" + + return {"name": implementation, "version": implementation_version} + + +def info(): + """Generate information for a bug report.""" + try: + platform_info = { + "system": platform.system(), + "release": platform.release(), + } + except OSError: + platform_info = { + "system": "Unknown", + "release": "Unknown", + } + + implementation_info = _implementation() + urllib3_info = {"version": urllib3.__version__} + charset_normalizer_info = {"version": None} + chardet_info = {"version": None} + if charset_normalizer: + charset_normalizer_info = {"version": charset_normalizer.__version__} + if chardet: + chardet_info = {"version": chardet.__version__} + + pyopenssl_info = { + "version": None, + "openssl_version": "", + } + if OpenSSL: + pyopenssl_info = { + "version": OpenSSL.__version__, + "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", + } + cryptography_info = { + "version": getattr(cryptography, "__version__", ""), + } + idna_info = { + "version": getattr(idna, "__version__", ""), + } + + system_ssl = ssl.OPENSSL_VERSION_NUMBER + system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} + + return { + "platform": platform_info, + "implementation": implementation_info, + "system_ssl": system_ssl_info, + "using_pyopenssl": pyopenssl is not None, + "using_charset_normalizer": chardet is None, + "pyOpenSSL": pyopenssl_info, + "urllib3": urllib3_info, + "chardet": chardet_info, + "charset_normalizer": charset_normalizer_info, + "cryptography": cryptography_info, + "idna": idna_info, + "requests": { + "version": requests_version, + }, + } + + +def main(): + """Pretty-print the bug information as JSON.""" + print(json.dumps(info(), sort_keys=True, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/requests/hooks.py b/venv/lib/python3.10/site-packages/requests/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..d181ba2ec2e55d274897315887b78fbdca757da8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/hooks.py @@ -0,0 +1,33 @@ +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ["response"] + + +def default_hooks(): + return {event: [] for event in HOOKS} + + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, "__call__"): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/venv/lib/python3.10/site-packages/requests/sessions.py b/venv/lib/python3.10/site-packages/requests/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..dbcf2a7b0ee2898b72714b756e4b27fbbad4beab --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/sessions.py @@ -0,0 +1,833 @@ +""" +requests.sessions +~~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from collections import OrderedDict +from datetime import timedelta + +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter +from .auth import _basic_auth_str +from .compat import Mapping, cookielib, urljoin, urlparse +from .cookies import ( + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) +from .exceptions import ( + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, +) +from .hooks import default_hooks, dispatch_hook + +# formerly defined here, reexposed here for backward compatibility +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == "win32": + preferred_clock = time.perf_counter +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get("response") == []: + return request_hooks + + if request_hooks is None or request_hooks.get("response") == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin: + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers["location"] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + location = location.encode("latin1") + return to_native_string(location, "utf8") + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith("//"): + parsed_rurl = urlparse(resp.url) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == "" and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): + # https://github.com/psf/requests/issues/3490 + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop("Cookie", None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs, + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers["Authorization"] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + headers = prepared_request.headers + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) + + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith('https') and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != "HEAD": + method = "GET" + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == "POST": + method = "GET" + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", + ] + + def __init__(self): + + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + #: Defaults to `True`, requiring requests to verify the TLS certificate at the + #: remote end. + #: If verify is set to `False`, requests will accept any TLS certificate + #: presented by the server, and will ignore hostname mismatches and/or + #: expired certificates, which will make your application vulnerable to + #: man-in-the-middle (MitM) attacks. + #: Only set this to `False` for testing. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. When set to + ``False``, requests will accept any TLS certificate presented by + the server, and will ignore hostname mismatches and/or expired + certificates, which will make your application vulnerable to + man-in-the-middle (MitM) attacks. Setting verify to ``False`` + may be useful during local development or testing. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + "timeout": timeout, + "allow_redirects": allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("POST", url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PUT", url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PATCH", url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("DELETE", url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError("You can only send PreparedRequests.") + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook("response", hooks, r, **kwargs) + + # Persist cookies + if r.history: + + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Resolve redirects if allowed. + if allow_redirects: + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + history = [resp for resp in gen] + else: + history = [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get("no_proxy") if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for (k, v) in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration + # and be compatible with cURL. + if verify is True or verify is None: + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for (prefix, adapter) in self.adapters.items(): + + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema(f"No connection adapters were found for {url!r}") + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/venv/lib/python3.10/site-packages/requests/status_codes.py b/venv/lib/python3.10/site-packages/requests/status_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd072be9769748a852740d037d5c63021472c9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/status_codes.py @@ -0,0 +1,128 @@ +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + # Informational. + 100: ("continue",), + 101: ("switching_protocols",), + 102: ("processing",), + 103: ("checkpoint",), + 122: ("uri_too_long", "request_uri_too_long"), + 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), + 201: ("created",), + 202: ("accepted",), + 203: ("non_authoritative_info", "non_authoritative_information"), + 204: ("no_content",), + 205: ("reset_content", "reset"), + 206: ("partial_content", "partial"), + 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), + 208: ("already_reported",), + 226: ("im_used",), + # Redirection. + 300: ("multiple_choices",), + 301: ("moved_permanently", "moved", "\\o-"), + 302: ("found",), + 303: ("see_other", "other"), + 304: ("not_modified",), + 305: ("use_proxy",), + 306: ("switch_proxy",), + 307: ("temporary_redirect", "temporary_moved", "temporary"), + 308: ( + "permanent_redirect", + "resume_incomplete", + "resume", + ), # "resume" and "resume_incomplete" to be removed in 3.0 + # Client Error. + 400: ("bad_request", "bad"), + 401: ("unauthorized",), + 402: ("payment_required", "payment"), + 403: ("forbidden",), + 404: ("not_found", "-o-"), + 405: ("method_not_allowed", "not_allowed"), + 406: ("not_acceptable",), + 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), + 408: ("request_timeout", "timeout"), + 409: ("conflict",), + 410: ("gone",), + 411: ("length_required",), + 412: ("precondition_failed", "precondition"), + 413: ("request_entity_too_large",), + 414: ("request_uri_too_large",), + 415: ("unsupported_media_type", "unsupported_media", "media_type"), + 416: ( + "requested_range_not_satisfiable", + "requested_range", + "range_not_satisfiable", + ), + 417: ("expectation_failed",), + 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), + 421: ("misdirected_request",), + 422: ("unprocessable_entity", "unprocessable"), + 423: ("locked",), + 424: ("failed_dependency", "dependency"), + 425: ("unordered_collection", "unordered"), + 426: ("upgrade_required", "upgrade"), + 428: ("precondition_required", "precondition"), + 429: ("too_many_requests", "too_many"), + 431: ("header_fields_too_large", "fields_too_large"), + 444: ("no_response", "none"), + 449: ("retry_with", "retry"), + 450: ("blocked_by_windows_parental_controls", "parental_controls"), + 451: ("unavailable_for_legal_reasons", "legal_reasons"), + 499: ("client_closed_request",), + # Server Error. + 500: ("internal_server_error", "server_error", "/o\\", "✗"), + 501: ("not_implemented",), + 502: ("bad_gateway",), + 503: ("service_unavailable", "unavailable"), + 504: ("gateway_timeout",), + 505: ("http_version_not_supported", "http_version"), + 506: ("variant_also_negotiates",), + 507: ("insufficient_storage",), + 509: ("bandwidth_limit_exceeded", "bandwidth"), + 510: ("not_extended",), + 511: ("network_authentication_required", "network_auth", "network_authentication"), +} + +codes = LookupDict(name="status_codes") + + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(("\\", "/")): + setattr(codes, title.upper(), code) + + def doc(code): + names = ", ".join(f"``{n}``" for n in _codes[code]) + return "* %d: %s" % (code, names) + + global __doc__ + __doc__ = ( + __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) + if __doc__ is not None + else None + ) + + +_init() diff --git a/venv/lib/python3.10/site-packages/requests/utils.py b/venv/lib/python3.10/site-packages/requests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a367417f8eb5926d0778eb50dc68ba68f977a31f --- /dev/null +++ b/venv/lib/python3.10/site-packages/requests/utils.py @@ -0,0 +1,1094 @@ +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from urllib3.util import make_headers, parse_url + +from . import certs +from .__version__ import __version__ + +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, +) +from .compat import parse_http_list as _parse_list_header +from .compat import ( + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) +from .cookies import cookiejar_from_dict +from .exceptions import ( + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict + +NETRC_FILES = (".netrc", "_netrc") + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) + + +if sys.platform == "win32": + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(";") + # now check if we match one of the registry values. + for test in proxyOverride: + if test == "": + if "." not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, "items"): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if hasattr(o, "__len__"): + total_length = len(o) + + elif hasattr(o, "len"): + total_length = o.len + + elif hasattr(o, "fileno"): + try: + fileno = o.fileno() + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, + ) + + if hasattr(o, "tell"): + try: + current_position = o.tell() + except OSError: + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except OSError: + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + netrc_file = os.environ.get("NETRC") + if netrc_file is not None: + netrc_locations = (netrc_file,) + else: + netrc_locations = (f"~/{f}" for f in NETRC_FILES) + + try: + from netrc import NetrcParseError, netrc + + netrc_path = None + + for f in netrc_locations: + try: + loc = os.path.expanduser(f) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See https://bugs.python.org/issue20164 & + # https://github.com/psf/requests/issues/1846 + return + + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b":" + if isinstance(url, str): + splitstr = splitstr.decode("ascii") + host = ri.netloc.split(splitstr)[0] + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = 0 if _netrc[0] else 1 + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, OSError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # App Engine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, member.split("/")[-1]) + if not os.path.exists(extracted_path): + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) + return extracted_path + + +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if "=" not in item: + result[item] = None + continue + name, value = item.split("=", 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {} + + for cookie in cj: + cookie_dict[cookie.name] = cookie.value + + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(";") + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get("content-type") + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if "charset" in params: + return params["charset"].strip("'\"") + + if "text" in content_type: + return "ISO-8859-1" + + if "application/json" in content_type: + # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset + return "utf-8" + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes an iterator.""" + + if r.encoding is None: + yield from iterator + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b"", final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos : pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors="replace") + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split("%") + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = f"%{parts[i]}" + else: + parts[i] = f"%{parts[i]}" + return "".join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except OSError: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count("/") == 1: + try: + mask = int(string_network.split("/")[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split("/")[0]) + except OSError: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy("no_proxy") + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += f":{parsed.port}" + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ("no_proxy", no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get("all")) + + proxy_keys = [ + urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme, + "all://" + urlparts.hostname, + "all", + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such a NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return f"{name}/{__version__}" + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = " '\"" + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + + link = {"url": url.strip("<> '\"")} + + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc + if not netloc: + netloc, path = path, netloc + + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ("", "") + + return auth + + +def check_header_validity(header): + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. + + :param header: tuple, in the format (name, value). + """ + name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) + + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] + else: + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return" + f"character(s) in header {header_kind}: {header_part!r}" + ) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit("@", 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, "")) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): + try: + body_seek(prepared_request._body_position) + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/LICENCE b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/LICENCE new file mode 100644 index 0000000000000000000000000000000000000000..a8922b182e80d9bcb955e8b8ae2bd9a017d72977 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/LICENCE @@ -0,0 +1,49 @@ +`tqdm` is a product of collaborative work. +Unless otherwise stated, all authors (see commit logs) retain copyright +for their respective work, and release the work under the MIT licence +(text below). + +Exceptions or notable authors are listed below +in reverse chronological order: + +* files: * + MPL-2.0 2015-2024 (c) Casper da Costa-Luis + [casperdcl](https://github.com/casperdcl). +* files: tqdm/_tqdm.py + MIT 2016 (c) [PR #96] on behalf of Google Inc. +* files: tqdm/_tqdm.py README.rst .gitignore + MIT 2013 (c) Noam Yorav-Raphael, original author. + +[PR #96]: https://github.com/tqdm/tqdm/pull/96 + + +Mozilla Public Licence (MPL) v. 2.0 - Exhibit A +----------------------------------------------- + +This Source Code Form is subject to the terms of the +Mozilla Public License, v. 2.0. +If a copy of the MPL was not distributed with this project, +You can obtain one at https://mozilla.org/MPL/2.0/. + + +MIT License (MIT) +----------------- + +Copyright (c) 2013 noamraph + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3dd9013fb15141903ff5e6f00cbae65238cd544a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/METADATA @@ -0,0 +1,1590 @@ +Metadata-Version: 2.1 +Name: tqdm +Version: 4.66.2 +Summary: Fast, Extensible Progress Meter +Maintainer-email: tqdm developers +License: MPL-2.0 AND MIT +Project-URL: homepage, https://tqdm.github.io +Project-URL: repository, https://github.com/tqdm/tqdm +Project-URL: changelog, https://tqdm.github.io/releases +Project-URL: wiki, https://github.com/tqdm/tqdm/wiki +Keywords: progressbar,progressmeter,progress,bar,meter,rate,eta,console,terminal,time +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Environment :: MacOS X +Classifier: Environment :: Other Environment +Classifier: Environment :: Win32 (MS Windows) +Classifier: Environment :: X11 Applications +Classifier: Framework :: IPython +Classifier: Framework :: Jupyter +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: End Users/Desktop +Classifier: Intended Audience :: Other Audience +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: MIT License +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Operating System :: MacOS +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: MS-DOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: BSD +Classifier: Operating System :: POSIX :: BSD :: FreeBSD +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX :: SunOS/Solaris +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation +Classifier: Programming Language :: Python :: Implementation :: IronPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Unix Shell +Classifier: Topic :: Desktop Environment +Classifier: Topic :: Education :: Computer Aided Instruction (CAI) +Classifier: Topic :: Education :: Testing +Classifier: Topic :: Office/Business +Classifier: Topic :: Other/Nonlisted Topic +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Pre-processors +Classifier: Topic :: Software Development :: User Interfaces +Classifier: Topic :: System :: Installation/Setup +Classifier: Topic :: System :: Logging +Classifier: Topic :: System :: Monitoring +Classifier: Topic :: System :: Shells +Classifier: Topic :: Terminals +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENCE +Requires-Dist: colorama ; platform_system == "Windows" +Provides-Extra: dev +Requires-Dist: pytest >=6 ; extra == 'dev' +Requires-Dist: pytest-cov ; extra == 'dev' +Requires-Dist: pytest-timeout ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Provides-Extra: notebook +Requires-Dist: ipywidgets >=6 ; extra == 'notebook' +Provides-Extra: slack +Requires-Dist: slack-sdk ; extra == 'slack' +Provides-Extra: telegram +Requires-Dist: requests ; extra == 'telegram' + +|Logo| + +tqdm +==== + +|Py-Versions| |Versions| |Conda-Forge-Status| |Docker| |Snapcraft| + +|Build-Status| |Coverage-Status| |Branch-Coverage-Status| |Codacy-Grade| |Libraries-Rank| |PyPI-Downloads| + +|LICENCE| |OpenHub-Status| |binder-demo| |awesome-python| + +``tqdm`` derives from the Arabic word *taqaddum* (تقدّم) which can mean "progress," +and is an abbreviation for "I love you so much" in Spanish (*te quiero demasiado*). + +Instantly make your loops show a smart progress meter - just wrap any +iterable with ``tqdm(iterable)``, and you're done! + +.. code:: python + + from tqdm import tqdm + for i in tqdm(range(10000)): + ... + +``76%|████████████████████████        | 7568/10000 [00:33<00:10, 229.00it/s]`` + +``trange(N)`` can be also used as a convenient shortcut for +``tqdm(range(N))``. + +|Screenshot| + |Video| |Slides| |Merch| + +It can also be executed as a module with pipes: + +.. code:: sh + + $ seq 9999999 | tqdm --bytes | wc -l + 75.2MB [00:00, 217MB/s] + 9999999 + + $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \ + > backup.tgz + 32%|██████████▍ | 8.89G/27.9G [00:42<01:31, 223MB/s] + +Overhead is low -- about 60ns per iteration (80ns with ``tqdm.gui``), and is +unit tested against performance regression. +By comparison, the well-established +`ProgressBar `__ has +an 800ns/iter overhead. + +In addition to its low overhead, ``tqdm`` uses smart algorithms to predict +the remaining time and to skip unnecessary iteration displays, which allows +for a negligible overhead in most cases. + +``tqdm`` works on any platform +(Linux, Windows, Mac, FreeBSD, NetBSD, Solaris/SunOS), +in any console or in a GUI, and is also friendly with IPython/Jupyter notebooks. + +``tqdm`` does not require any dependencies (not even ``curses``!), just +Python and an environment supporting ``carriage return \r`` and +``line feed \n`` control characters. + +------------------------------------------ + +.. contents:: Table of contents + :backlinks: top + :local: + + +Installation +------------ + +Latest PyPI stable release +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +|Versions| |PyPI-Downloads| |Libraries-Dependents| + +.. code:: sh + + pip install tqdm + +Latest development release on GitHub +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +|GitHub-Status| |GitHub-Stars| |GitHub-Commits| |GitHub-Forks| |GitHub-Updated| + +Pull and install pre-release ``devel`` branch: + +.. code:: sh + + pip install "git+https://github.com/tqdm/tqdm.git@devel#egg=tqdm" + +Latest Conda release +~~~~~~~~~~~~~~~~~~~~ + +|Conda-Forge-Status| + +.. code:: sh + + conda install -c conda-forge tqdm + +Latest Snapcraft release +~~~~~~~~~~~~~~~~~~~~~~~~ + +|Snapcraft| + +There are 3 channels to choose from: + +.. code:: sh + + snap install tqdm # implies --stable, i.e. latest tagged release + snap install tqdm --candidate # master branch + snap install tqdm --edge # devel branch + +Note that ``snap`` binaries are purely for CLI use (not ``import``-able), and +automatically set up ``bash`` tab-completion. + +Latest Docker release +~~~~~~~~~~~~~~~~~~~~~ + +|Docker| + +.. code:: sh + + docker pull tqdm/tqdm + docker run -i --rm tqdm/tqdm --help + +Other +~~~~~ + +There are other (unofficial) places where ``tqdm`` may be downloaded, particularly for CLI use: + +|Repology| + +.. |Repology| image:: https://repology.org/badge/tiny-repos/python:tqdm.svg + :target: https://repology.org/project/python:tqdm/versions + +Changelog +--------- + +The list of all changes is available either on GitHub's Releases: +|GitHub-Status|, on the +`wiki `__, or on the +`website `__. + + +Usage +----- + +``tqdm`` is very versatile and can be used in a number of ways. +The three main ones are given below. + +Iterable-based +~~~~~~~~~~~~~~ + +Wrap ``tqdm()`` around any iterable: + +.. code:: python + + from tqdm import tqdm + from time import sleep + + text = "" + for char in tqdm(["a", "b", "c", "d"]): + sleep(0.25) + text = text + char + +``trange(i)`` is a special optimised instance of ``tqdm(range(i))``: + +.. code:: python + + from tqdm import trange + + for i in trange(100): + sleep(0.01) + +Instantiation outside of the loop allows for manual control over ``tqdm()``: + +.. code:: python + + pbar = tqdm(["a", "b", "c", "d"]) + for char in pbar: + sleep(0.25) + pbar.set_description("Processing %s" % char) + +Manual +~~~~~~ + +Manual control of ``tqdm()`` updates using a ``with`` statement: + +.. code:: python + + with tqdm(total=100) as pbar: + for i in range(10): + sleep(0.1) + pbar.update(10) + +If the optional variable ``total`` (or an iterable with ``len()``) is +provided, predictive stats are displayed. + +``with`` is also optional (you can just assign ``tqdm()`` to a variable, +but in this case don't forget to ``del`` or ``close()`` at the end: + +.. code:: python + + pbar = tqdm(total=100) + for i in range(10): + sleep(0.1) + pbar.update(10) + pbar.close() + +Module +~~~~~~ + +Perhaps the most wonderful use of ``tqdm`` is in a script or on the command +line. Simply inserting ``tqdm`` (or ``python -m tqdm``) between pipes will pass +through all ``stdin`` to ``stdout`` while printing progress to ``stderr``. + +The example below demonstrate counting the number of lines in all Python files +in the current directory, with timing information included. + +.. code:: sh + + $ time find . -name '*.py' -type f -exec cat \{} \; | wc -l + 857365 + + real 0m3.458s + user 0m0.274s + sys 0m3.325s + + $ time find . -name '*.py' -type f -exec cat \{} \; | tqdm | wc -l + 857366it [00:03, 246471.31it/s] + 857365 + + real 0m3.585s + user 0m0.862s + sys 0m3.358s + +Note that the usual arguments for ``tqdm`` can also be specified. + +.. code:: sh + + $ find . -name '*.py' -type f -exec cat \{} \; | + tqdm --unit loc --unit_scale --total 857366 >> /dev/null + 100%|█████████████████████████████████| 857K/857K [00:04<00:00, 246Kloc/s] + +Backing up a large directory? + +.. code:: sh + + $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \ + > backup.tgz + 44%|██████████████▊ | 153M/352M [00:14<00:18, 11.0MB/s] + +This can be beautified further: + +.. code:: sh + + $ BYTES=$(du -sb docs/ | cut -f1) + $ tar -cf - docs/ \ + | tqdm --bytes --total "$BYTES" --desc Processing | gzip \ + | tqdm --bytes --total "$BYTES" --desc Compressed --position 1 \ + > ~/backup.tgz + Processing: 100%|██████████████████████| 352M/352M [00:14<00:00, 30.2MB/s] + Compressed: 42%|█████████▎ | 148M/352M [00:14<00:19, 10.9MB/s] + +Or done on a file level using 7-zip: + +.. code:: sh + + $ 7z a -bd -r backup.7z docs/ | grep Compressing \ + | tqdm --total $(find docs/ -type f | wc -l) --unit files \ + | grep -v Compressing + 100%|██████████████████████████▉| 15327/15327 [01:00<00:00, 712.96files/s] + +Pre-existing CLI programs already outputting basic progress information will +benefit from ``tqdm``'s ``--update`` and ``--update_to`` flags: + +.. code:: sh + + $ seq 3 0.1 5 | tqdm --total 5 --update_to --null + 100%|████████████████████████████████████| 5.0/5 [00:00<00:00, 9673.21it/s] + $ seq 10 | tqdm --update --null # 1 + 2 + ... + 10 = 55 iterations + 55it [00:00, 90006.52it/s] + +FAQ and Known Issues +-------------------- + +|GitHub-Issues| + +The most common issues relate to excessive output on multiple lines, instead +of a neat one-line progress bar. + +- Consoles in general: require support for carriage return (``CR``, ``\r``). + + * Some cloud logging consoles which don't support ``\r`` properly + (`cloudwatch `__, + `K8s `__) may benefit from + ``export TQDM_POSITION=-1``. + +- Nested progress bars: + + * Consoles in general: require support for moving cursors up to the + previous line. For example, + `IDLE `__, + `ConEmu `__ and + `PyCharm `__ (also + `here `__, + `here `__, and + `here `__) + lack full support. + * Windows: additionally may require the Python module ``colorama`` + to ensure nested bars stay within their respective lines. + +- Unicode: + + * Environments which report that they support unicode will have solid smooth + progressbars. The fallback is an ``ascii``-only bar. + * Windows consoles often only partially support unicode and thus + `often require explicit ascii=True `__ + (also `here `__). This is due to + either normal-width unicode characters being incorrectly displayed as + "wide", or some unicode characters not rendering. + +- Wrapping generators: + + * Generator wrapper functions tend to hide the length of iterables. + ``tqdm`` does not. + * Replace ``tqdm(enumerate(...))`` with ``enumerate(tqdm(...))`` or + ``tqdm(enumerate(x), total=len(x), ...)``. + The same applies to ``numpy.ndenumerate``. + * Replace ``tqdm(zip(a, b))`` with ``zip(tqdm(a), b)`` or even + ``zip(tqdm(a), tqdm(b))``. + * The same applies to ``itertools``. + * Some useful convenience functions can be found under ``tqdm.contrib``. + +- `No intermediate output in docker-compose `__: + use ``docker-compose run`` instead of ``docker-compose up`` and ``tty: true``. + +- Overriding defaults via environment variables: + e.g. in CI/cloud jobs, ``export TQDM_MININTERVAL=5`` to avoid log spam. + This override logic is handled by the ``tqdm.utils.envwrap`` decorator + (useful independent of ``tqdm``). + +If you come across any other difficulties, browse and file |GitHub-Issues|. + +Documentation +------------- + +|Py-Versions| |README-Hits| (Since 19 May 2016) + +.. code:: python + + class tqdm(): + """ + Decorate an iterable object, returning an iterator which acts exactly + like the original iterable, but prints a dynamically updating + progressbar every time a value is requested. + """ + + @envwrap("TQDM_") # override defaults via env vars + def __init__(self, iterable=None, desc=None, total=None, leave=True, + file=None, ncols=None, mininterval=0.1, + maxinterval=10.0, miniters=None, ascii=None, disable=False, + unit='it', unit_scale=False, dynamic_ncols=False, + smoothing=0.3, bar_format=None, initial=0, position=None, + postfix=None, unit_divisor=1000, write_bytes=False, + lock_args=None, nrows=None, colour=None, delay=0): + +Parameters +~~~~~~~~~~ + +* iterable : iterable, optional + Iterable to decorate with a progressbar. + Leave blank to manually manage the updates. +* desc : str, optional + Prefix for the progressbar. +* total : int or float, optional + The number of expected iterations. If unspecified, + len(iterable) is used if possible. If float("inf") or as a last + resort, only basic progress statistics are displayed + (no ETA, no progressbar). + If ``gui`` is True and this parameter needs subsequent updating, + specify an initial arbitrary large positive number, + e.g. 9e9. +* leave : bool, optional + If [default: True], keeps all traces of the progressbar + upon termination of iteration. + If ``None``, will leave only if ``position`` is ``0``. +* file : ``io.TextIOWrapper`` or ``io.StringIO``, optional + Specifies where to output the progress messages + (default: sys.stderr). Uses ``file.write(str)`` and ``file.flush()`` + methods. For encoding, see ``write_bytes``. +* ncols : int, optional + The width of the entire output message. If specified, + dynamically resizes the progressbar to stay within this bound. + If unspecified, attempts to use environment width. The + fallback is a meter width of 10 and no limit for the counter and + statistics. If 0, will not print any meter (only stats). +* mininterval : float, optional + Minimum progress display update interval [default: 0.1] seconds. +* maxinterval : float, optional + Maximum progress display update interval [default: 10] seconds. + Automatically adjusts ``miniters`` to correspond to ``mininterval`` + after long display update lag. Only works if ``dynamic_miniters`` + or monitor thread is enabled. +* miniters : int or float, optional + Minimum progress display update interval, in iterations. + If 0 and ``dynamic_miniters``, will automatically adjust to equal + ``mininterval`` (more CPU efficient, good for tight loops). + If > 0, will skip display of specified number of iterations. + Tweak this and ``mininterval`` to get very efficient loops. + If your progress is erratic with both fast and slow iterations + (network, skipping items, etc) you should set miniters=1. +* ascii : bool or str, optional + If unspecified or False, use unicode (smooth blocks) to fill + the meter. The fallback is to use ASCII characters " 123456789#". +* disable : bool, optional + Whether to disable the entire progressbar wrapper + [default: False]. If set to None, disable on non-TTY. +* unit : str, optional + String that will be used to define the unit of each iteration + [default: it]. +* unit_scale : bool or int or float, optional + If 1 or True, the number of iterations will be reduced/scaled + automatically and a metric prefix following the + International System of Units standard will be added + (kilo, mega, etc.) [default: False]. If any other non-zero + number, will scale ``total`` and ``n``. +* dynamic_ncols : bool, optional + If set, constantly alters ``ncols`` and ``nrows`` to the + environment (allowing for window resizes) [default: False]. +* smoothing : float, optional + Exponential moving average smoothing factor for speed estimates + (ignored in GUI mode). Ranges from 0 (average speed) to 1 + (current/instantaneous speed) [default: 0.3]. +* bar_format : str, optional + Specify a custom bar string formatting. May impact performance. + [default: '{l_bar}{bar}{r_bar}'], where + l_bar='{desc}: {percentage:3.0f}%|' and + r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' + '{rate_fmt}{postfix}]' + Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, + percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, + rate, rate_fmt, rate_noinv, rate_noinv_fmt, + rate_inv, rate_inv_fmt, postfix, unit_divisor, + remaining, remaining_s, eta. + Note that a trailing ": " is automatically removed after {desc} + if the latter is empty. +* initial : int or float, optional + The initial counter value. Useful when restarting a progress + bar [default: 0]. If using float, consider specifying ``{n:.3f}`` + or similar in ``bar_format``, or specifying ``unit_scale``. +* position : int, optional + Specify the line offset to print this bar (starting from 0) + Automatic if unspecified. + Useful to manage multiple bars at once (eg, from threads). +* postfix : dict or ``*``, optional + Specify additional stats to display at the end of the bar. + Calls ``set_postfix(**postfix)`` if possible (dict). +* unit_divisor : float, optional + [default: 1000], ignored unless ``unit_scale`` is True. +* write_bytes : bool, optional + Whether to write bytes. If (default: False) will write unicode. +* lock_args : tuple, optional + Passed to ``refresh`` for intermediate output + (initialisation, iterating, and updating). +* nrows : int, optional + The screen height. If specified, hides nested bars outside this + bound. If unspecified, attempts to use environment height. + The fallback is 20. +* colour : str, optional + Bar colour (e.g. 'green', '#00ff00'). +* delay : float, optional + Don't display until [default: 0] seconds have elapsed. + +Extra CLI Options +~~~~~~~~~~~~~~~~~ + +* delim : chr, optional + Delimiting character [default: '\n']. Use '\0' for null. + N.B.: on Windows systems, Python converts '\n' to '\r\n'. +* buf_size : int, optional + String buffer size in bytes [default: 256] + used when ``delim`` is specified. +* bytes : bool, optional + If true, will count bytes, ignore ``delim``, and default + ``unit_scale`` to True, ``unit_divisor`` to 1024, and ``unit`` to 'B'. +* tee : bool, optional + If true, passes ``stdin`` to both ``stderr`` and ``stdout``. +* update : bool, optional + If true, will treat input as newly elapsed iterations, + i.e. numbers to pass to ``update()``. Note that this is slow + (~2e5 it/s) since every input must be decoded as a number. +* update_to : bool, optional + If true, will treat input as total elapsed iterations, + i.e. numbers to assign to ``self.n``. Note that this is slow + (~2e5 it/s) since every input must be decoded as a number. +* null : bool, optional + If true, will discard input (no stdout). +* manpath : str, optional + Directory in which to install tqdm man pages. +* comppath : str, optional + Directory in which to place tqdm completion. +* log : str, optional + CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. + +Returns +~~~~~~~ + +* out : decorated iterator. + +.. code:: python + + class tqdm(): + def update(self, n=1): + """ + Manually update the progress bar, useful for streams + such as reading files. + E.g.: + >>> t = tqdm(total=filesize) # Initialise + >>> for current_buffer in stream: + ... ... + ... t.update(len(current_buffer)) + >>> t.close() + The last line is highly recommended, but possibly not necessary if + ``t.update()`` will be called in such a way that ``filesize`` will be + exactly reached and printed. + + Parameters + ---------- + n : int or float, optional + Increment to add to the internal counter of iterations + [default: 1]. If using float, consider specifying ``{n:.3f}`` + or similar in ``bar_format``, or specifying ``unit_scale``. + + Returns + ------- + out : bool or None + True if a ``display()`` was triggered. + """ + + def close(self): + """Cleanup and (if leave=False) close the progressbar.""" + + def clear(self, nomove=False): + """Clear current bar display.""" + + def refresh(self): + """ + Force refresh the display of this bar. + + Parameters + ---------- + nolock : bool, optional + If ``True``, does not lock. + If [default: ``False``]: calls ``acquire()`` on internal lock. + lock_args : tuple, optional + Passed to internal lock's ``acquire()``. + If specified, will only ``display()`` if ``acquire()`` returns ``True``. + """ + + def unpause(self): + """Restart tqdm timer from last print time.""" + + def reset(self, total=None): + """ + Resets to 0 iterations for repeated use. + + Consider combining with ``leave=True``. + + Parameters + ---------- + total : int or float, optional. Total to use for the new bar. + """ + + def set_description(self, desc=None, refresh=True): + """ + Set/modify description of the progress bar. + + Parameters + ---------- + desc : str, optional + refresh : bool, optional + Forces refresh [default: True]. + """ + + def set_postfix(self, ordered_dict=None, refresh=True, **tqdm_kwargs): + """ + Set/modify postfix (additional stats) + with automatic formatting based on datatype. + + Parameters + ---------- + ordered_dict : dict or OrderedDict, optional + refresh : bool, optional + Forces refresh [default: True]. + kwargs : dict, optional + """ + + @classmethod + def write(cls, s, file=sys.stdout, end="\n"): + """Print a message via tqdm (without overlap with bars).""" + + @property + def format_dict(self): + """Public API for read-only member access.""" + + def display(self, msg=None, pos=None): + """ + Use ``self.sp`` to display ``msg`` in the specified ``pos``. + + Consider overloading this function when inheriting to use e.g.: + ``self.some_frontend(**self.format_dict)`` instead of ``self.sp``. + + Parameters + ---------- + msg : str, optional. What to display (default: ``repr(self)``). + pos : int, optional. Position to ``moveto`` + (default: ``abs(self.pos)``). + """ + + @classmethod + @contextmanager + def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs): + """ + stream : file-like object. + method : str, "read" or "write". The result of ``read()`` and + the first argument of ``write()`` should have a ``len()``. + + >>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj: + ... while True: + ... chunk = fobj.read(chunk_size) + ... if not chunk: + ... break + """ + + @classmethod + def pandas(cls, *targs, **tqdm_kwargs): + """Registers the current `tqdm` class with `pandas`.""" + + def trange(*args, **tqdm_kwargs): + """Shortcut for `tqdm(range(*args), **tqdm_kwargs)`.""" + +Convenience Functions +~~~~~~~~~~~~~~~~~~~~~ + +.. code:: python + + def tqdm.contrib.tenumerate(iterable, start=0, total=None, + tqdm_class=tqdm.auto.tqdm, **tqdm_kwargs): + """Equivalent of `numpy.ndenumerate` or builtin `enumerate`.""" + + def tqdm.contrib.tzip(iter1, *iter2plus, **tqdm_kwargs): + """Equivalent of builtin `zip`.""" + + def tqdm.contrib.tmap(function, *sequences, **tqdm_kwargs): + """Equivalent of builtin `map`.""" + +Submodules +~~~~~~~~~~ + +.. code:: python + + class tqdm.notebook.tqdm(tqdm.tqdm): + """IPython/Jupyter Notebook widget.""" + + class tqdm.auto.tqdm(tqdm.tqdm): + """Automatically chooses beween `tqdm.notebook` and `tqdm.tqdm`.""" + + class tqdm.asyncio.tqdm(tqdm.tqdm): + """Asynchronous version.""" + @classmethod + def as_completed(cls, fs, *, loop=None, timeout=None, total=None, + **tqdm_kwargs): + """Wrapper for `asyncio.as_completed`.""" + + class tqdm.gui.tqdm(tqdm.tqdm): + """Matplotlib GUI version.""" + + class tqdm.tk.tqdm(tqdm.tqdm): + """Tkinter GUI version.""" + + class tqdm.rich.tqdm(tqdm.tqdm): + """`rich.progress` version.""" + + class tqdm.keras.TqdmCallback(keras.callbacks.Callback): + """Keras callback for epoch and batch progress.""" + + class tqdm.dask.TqdmCallback(dask.callbacks.Callback): + """Dask callback for task progress.""" + + +``contrib`` ++++++++++++ + +The ``tqdm.contrib`` package also contains experimental modules: + +- ``tqdm.contrib.itertools``: Thin wrappers around ``itertools`` +- ``tqdm.contrib.concurrent``: Thin wrappers around ``concurrent.futures`` +- ``tqdm.contrib.slack``: Posts to `Slack `__ bots +- ``tqdm.contrib.discord``: Posts to `Discord `__ bots +- ``tqdm.contrib.telegram``: Posts to `Telegram `__ bots +- ``tqdm.contrib.bells``: Automagically enables all optional features + + * ``auto``, ``pandas``, ``slack``, ``discord``, ``telegram`` + +Examples and Advanced Usage +--------------------------- + +- See the `examples `__ + folder; +- import the module and run ``help()``; +- consult the `wiki `__; + + * this has an + `excellent article `__ + on how to make a **great** progressbar; + +- check out the `slides from PyData London `__, or +- run the |binder-demo|. + +Description and additional stats +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Custom information can be displayed and updated dynamically on ``tqdm`` bars +with the ``desc`` and ``postfix`` arguments: + +.. code:: python + + from tqdm import tqdm, trange + from random import random, randint + from time import sleep + + with trange(10) as t: + for i in t: + # Description will be displayed on the left + t.set_description('GEN %i' % i) + # Postfix will be displayed on the right, + # formatted automatically based on argument's datatype + t.set_postfix(loss=random(), gen=randint(1,999), str='h', + lst=[1, 2]) + sleep(0.1) + + with tqdm(total=10, bar_format="{postfix[0]} {postfix[1][value]:>8.2g}", + postfix=["Batch", {"value": 0}]) as t: + for i in range(10): + sleep(0.1) + t.postfix[1]["value"] = i / 2 + t.update() + +Points to remember when using ``{postfix[...]}`` in the ``bar_format`` string: + +- ``postfix`` also needs to be passed as an initial argument in a compatible + format, and +- ``postfix`` will be auto-converted to a string if it is a ``dict``-like + object. To prevent this behaviour, insert an extra item into the dictionary + where the key is not a string. + +Additional ``bar_format`` parameters may also be defined by overriding +``format_dict``, and the bar itself may be modified using ``ascii``: + +.. code:: python + + from tqdm import tqdm + class TqdmExtraFormat(tqdm): + """Provides a `total_time` format parameter""" + @property + def format_dict(self): + d = super(TqdmExtraFormat, self).format_dict + total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) + d.update(total_time=self.format_interval(total_time) + " in total") + return d + + for i in TqdmExtraFormat( + range(9), ascii=" .oO0", + bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"): + if i == 4: + break + +.. code:: + + 00:00 in total: 44%|0000. | 4/9 [00:00<00:00, 962.93it/s] + +Note that ``{bar}`` also supports a format specifier ``[width][type]``. + +- ``width`` + + * unspecified (default): automatic to fill ``ncols`` + * ``int >= 0``: fixed width overriding ``ncols`` logic + * ``int < 0``: subtract from the automatic default + +- ``type`` + + * ``a``: ascii (``ascii=True`` override) + * ``u``: unicode (``ascii=False`` override) + * ``b``: blank (``ascii=" "`` override) + +This means a fixed bar with right-justified text may be created by using: +``bar_format="{l_bar}{bar:10}|{bar:-10b}right-justified"`` + +Nested progress bars +~~~~~~~~~~~~~~~~~~~~ + +``tqdm`` supports nested progress bars. Here's an example: + +.. code:: python + + from tqdm.auto import trange + from time import sleep + + for i in trange(4, desc='1st loop'): + for j in trange(5, desc='2nd loop'): + for k in trange(50, desc='3rd loop', leave=False): + sleep(0.01) + +For manual control over positioning (e.g. for multi-processing use), +you may specify ``position=n`` where ``n=0`` for the outermost bar, +``n=1`` for the next, and so on. +However, it's best to check if ``tqdm`` can work without manual ``position`` +first. + +.. code:: python + + from time import sleep + from tqdm import trange, tqdm + from multiprocessing import Pool, RLock, freeze_support + + L = list(range(9)) + + def progresser(n): + interval = 0.001 / (n + 2) + total = 5000 + text = f"#{n}, est. {interval * total:<04.2}s" + for _ in trange(total, desc=text, position=n): + sleep(interval) + + if __name__ == '__main__': + freeze_support() # for Windows support + tqdm.set_lock(RLock()) # for managing output contention + p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),)) + p.map(progresser, L) + +Note that in Python 3, ``tqdm.write`` is thread-safe: + +.. code:: python + + from time import sleep + from tqdm import tqdm, trange + from concurrent.futures import ThreadPoolExecutor + + L = list(range(9)) + + def progresser(n): + interval = 0.001 / (n + 2) + total = 5000 + text = f"#{n}, est. {interval * total:<04.2}s" + for _ in trange(total, desc=text): + sleep(interval) + if n == 6: + tqdm.write("n == 6 completed.") + tqdm.write("`tqdm.write()` is thread-safe in py3!") + + if __name__ == '__main__': + with ThreadPoolExecutor() as p: + p.map(progresser, L) + +Hooks and callbacks +~~~~~~~~~~~~~~~~~~~ + +``tqdm`` can easily support callbacks/hooks and manual updates. +Here's an example with ``urllib``: + +**``urllib.urlretrieve`` documentation** + + | [...] + | If present, the hook function will be called once + | on establishment of the network connection and once after each block read + | thereafter. The hook will be passed three arguments; a count of blocks + | transferred so far, a block size in bytes, and the total size of the file. + | [...] + +.. code:: python + + import urllib, os + from tqdm import tqdm + urllib = getattr(urllib, 'request', urllib) + + class TqdmUpTo(tqdm): + """Provides `update_to(n)` which uses `tqdm.update(delta_n)`.""" + def update_to(self, b=1, bsize=1, tsize=None): + """ + b : int, optional + Number of blocks transferred so far [default: 1]. + bsize : int, optional + Size of each block (in tqdm units) [default: 1]. + tsize : int, optional + Total size (in tqdm units). If [default: None] remains unchanged. + """ + if tsize is not None: + self.total = tsize + return self.update(b * bsize - self.n) # also sets self.n = b * bsize + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, + desc=eg_link.split('/')[-1]) as t: # all optional kwargs + urllib.urlretrieve(eg_link, filename=os.devnull, + reporthook=t.update_to, data=None) + t.total = t.n + +Inspired by `twine#242 `__. +Functional alternative in +`examples/tqdm_wget.py `__. + +It is recommend to use ``miniters=1`` whenever there is potentially +large differences in iteration speed (e.g. downloading a file over +a patchy connection). + +**Wrapping read/write methods** + +To measure throughput through a file-like object's ``read`` or ``write`` +methods, use ``CallbackIOWrapper``: + +.. code:: python + + from tqdm.auto import tqdm + from tqdm.utils import CallbackIOWrapper + + with tqdm(total=file_obj.size, + unit='B', unit_scale=True, unit_divisor=1024) as t: + fobj = CallbackIOWrapper(t.update, file_obj, "read") + while True: + chunk = fobj.read(chunk_size) + if not chunk: + break + t.reset() + # ... continue to use `t` for something else + +Alternatively, use the even simpler ``wrapattr`` convenience function, +which would condense both the ``urllib`` and ``CallbackIOWrapper`` examples +down to: + +.. code:: python + + import urllib, os + from tqdm import tqdm + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + response = getattr(urllib, 'request', urllib).urlopen(eg_link) + with tqdm.wrapattr(open(os.devnull, "wb"), "write", + miniters=1, desc=eg_link.split('/')[-1], + total=getattr(response, 'length', None)) as fout: + for chunk in response: + fout.write(chunk) + +The ``requests`` equivalent is nearly identical: + +.. code:: python + + import requests, os + from tqdm import tqdm + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + response = requests.get(eg_link, stream=True) + with tqdm.wrapattr(open(os.devnull, "wb"), "write", + miniters=1, desc=eg_link.split('/')[-1], + total=int(response.headers.get('content-length', 0))) as fout: + for chunk in response.iter_content(chunk_size=4096): + fout.write(chunk) + +**Custom callback** + +``tqdm`` is known for intelligently skipping unnecessary displays. To make a +custom callback take advantage of this, simply use the return value of +``update()``. This is set to ``True`` if a ``display()`` was triggered. + +.. code:: python + + from tqdm.auto import tqdm as std_tqdm + + def external_callback(*args, **kwargs): + ... + + class TqdmExt(std_tqdm): + def update(self, n=1): + displayed = super(TqdmExt, self).update(n) + if displayed: + external_callback(**self.format_dict) + return displayed + +``asyncio`` +~~~~~~~~~~~ + +Note that ``break`` isn't currently caught by asynchronous iterators. +This means that ``tqdm`` cannot clean up after itself in this case: + +.. code:: python + + from tqdm.asyncio import tqdm + + async for i in tqdm(range(9)): + if i == 2: + break + +Instead, either call ``pbar.close()`` manually or use the context manager syntax: + +.. code:: python + + from tqdm.asyncio import tqdm + + with tqdm(range(9)) as pbar: + async for i in pbar: + if i == 2: + break + +Pandas Integration +~~~~~~~~~~~~~~~~~~ + +Due to popular demand we've added support for ``pandas`` -- here's an example +for ``DataFrame.progress_apply`` and ``DataFrameGroupBy.progress_apply``: + +.. code:: python + + import pandas as pd + import numpy as np + from tqdm import tqdm + + df = pd.DataFrame(np.random.randint(0, 100, (100000, 6))) + + # Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm` + # (can use `tqdm.gui.tqdm`, `tqdm.notebook.tqdm`, optional kwargs, etc.) + tqdm.pandas(desc="my bar!") + + # Now you can use `progress_apply` instead of `apply` + # and `progress_map` instead of `map` + df.progress_apply(lambda x: x**2) + # can also groupby: + # df.groupby(0).progress_apply(lambda x: x**2) + +In case you're interested in how this works (and how to modify it for your +own callbacks), see the +`examples `__ +folder or import the module and run ``help()``. + +Keras Integration +~~~~~~~~~~~~~~~~~ + +A ``keras`` callback is also available: + +.. code:: python + + from tqdm.keras import TqdmCallback + + ... + + model.fit(..., verbose=0, callbacks=[TqdmCallback()]) + +Dask Integration +~~~~~~~~~~~~~~~~ + +A ``dask`` callback is also available: + +.. code:: python + + from tqdm.dask import TqdmCallback + + with TqdmCallback(desc="compute"): + ... + arr.compute() + + # or use callback globally + cb = TqdmCallback(desc="global") + cb.register() + arr.compute() + +IPython/Jupyter Integration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +IPython/Jupyter is supported via the ``tqdm.notebook`` submodule: + +.. code:: python + + from tqdm.notebook import trange, tqdm + from time import sleep + + for i in trange(3, desc='1st loop'): + for j in tqdm(range(100), desc='2nd loop'): + sleep(0.01) + +In addition to ``tqdm`` features, the submodule provides a native Jupyter +widget (compatible with IPython v1-v4 and Jupyter), fully working nested bars +and colour hints (blue: normal, green: completed, red: error/interrupt, +light blue: no ETA); as demonstrated below. + +|Screenshot-Jupyter1| +|Screenshot-Jupyter2| +|Screenshot-Jupyter3| + +The ``notebook`` version supports percentage or pixels for overall width +(e.g.: ``ncols='100%'`` or ``ncols='480px'``). + +It is also possible to let ``tqdm`` automatically choose between +console or notebook versions by using the ``autonotebook`` submodule: + +.. code:: python + + from tqdm.autonotebook import tqdm + tqdm.pandas() + +Note that this will issue a ``TqdmExperimentalWarning`` if run in a notebook +since it is not meant to be possible to distinguish between ``jupyter notebook`` +and ``jupyter console``. Use ``auto`` instead of ``autonotebook`` to suppress +this warning. + +Note that notebooks will display the bar in the cell where it was created. +This may be a different cell from the one where it is used. +If this is not desired, either + +- delay the creation of the bar to the cell where it must be displayed, or +- create the bar with ``display=False``, and in a later cell call + ``display(bar.container)``: + +.. code:: python + + from tqdm.notebook import tqdm + pbar = tqdm(..., display=False) + +.. code:: python + + # different cell + display(pbar.container) + +The ``keras`` callback has a ``display()`` method which can be used likewise: + +.. code:: python + + from tqdm.keras import TqdmCallback + cbk = TqdmCallback(display=False) + +.. code:: python + + # different cell + cbk.display() + model.fit(..., verbose=0, callbacks=[cbk]) + +Another possibility is to have a single bar (near the top of the notebook) +which is constantly re-used (using ``reset()`` rather than ``close()``). +For this reason, the notebook version (unlike the CLI version) does not +automatically call ``close()`` upon ``Exception``. + +.. code:: python + + from tqdm.notebook import tqdm + pbar = tqdm() + +.. code:: python + + # different cell + iterable = range(100) + pbar.reset(total=len(iterable)) # initialise with new `total` + for i in iterable: + pbar.update() + pbar.refresh() # force print final status but don't `close()` + +Custom Integration +~~~~~~~~~~~~~~~~~~ + +To change the default arguments (such as making ``dynamic_ncols=True``), +simply use built-in Python magic: + +.. code:: python + + from functools import partial + from tqdm import tqdm as std_tqdm + tqdm = partial(std_tqdm, dynamic_ncols=True) + +For further customisation, +``tqdm`` may be inherited from to create custom callbacks (as with the +``TqdmUpTo`` example `above <#hooks-and-callbacks>`__) or for custom frontends +(e.g. GUIs such as notebook or plotting packages). In the latter case: + +1. ``def __init__()`` to call ``super().__init__(..., gui=True)`` to disable + terminal ``status_printer`` creation. +2. Redefine: ``close()``, ``clear()``, ``display()``. + +Consider overloading ``display()`` to use e.g. +``self.frontend(**self.format_dict)`` instead of ``self.sp(repr(self))``. + +Some submodule examples of inheritance: + +- `tqdm/notebook.py `__ +- `tqdm/gui.py `__ +- `tqdm/tk.py `__ +- `tqdm/contrib/slack.py `__ +- `tqdm/contrib/discord.py `__ +- `tqdm/contrib/telegram.py `__ + +Dynamic Monitor/Meter +~~~~~~~~~~~~~~~~~~~~~ + +You can use a ``tqdm`` as a meter which is not monotonically increasing. +This could be because ``n`` decreases (e.g. a CPU usage monitor) or ``total`` +changes. + +One example would be recursively searching for files. The ``total`` is the +number of objects found so far, while ``n`` is the number of those objects which +are files (rather than folders): + +.. code:: python + + from tqdm import tqdm + import os.path + + def find_files_recursively(path, show_progress=True): + files = [] + # total=1 assumes `path` is a file + t = tqdm(total=1, unit="file", disable=not show_progress) + if not os.path.exists(path): + raise IOError("Cannot find:" + path) + + def append_found_file(f): + files.append(f) + t.update() + + def list_found_dir(path): + """returns os.listdir(path) assuming os.path.isdir(path)""" + listing = os.listdir(path) + # subtract 1 since a "file" we found was actually this directory + t.total += len(listing) - 1 + # fancy way to give info without forcing a refresh + t.set_postfix(dir=path[-10:], refresh=False) + t.update(0) # may trigger a refresh + return listing + + def recursively_search(path): + if os.path.isdir(path): + for f in list_found_dir(path): + recursively_search(os.path.join(path, f)) + else: + append_found_file(path) + + recursively_search(path) + t.set_postfix(dir=path) + t.close() + return files + +Using ``update(0)`` is a handy way to let ``tqdm`` decide when to trigger a +display refresh to avoid console spamming. + +Writing messages +~~~~~~~~~~~~~~~~ + +This is a work in progress (see +`#737 `__). + +Since ``tqdm`` uses a simple printing mechanism to display progress bars, +you should not write any message in the terminal using ``print()`` while +a progressbar is open. + +To write messages in the terminal without any collision with ``tqdm`` bar +display, a ``.write()`` method is provided: + +.. code:: python + + from tqdm.auto import tqdm, trange + from time import sleep + + bar = trange(10) + for i in bar: + # Print using tqdm class method .write() + sleep(0.1) + if not (i % 3): + tqdm.write("Done task %i" % i) + # Can also use bar.write() + +By default, this will print to standard output ``sys.stdout``. but you can +specify any file-like object using the ``file`` argument. For example, this +can be used to redirect the messages writing to a log file or class. + +Redirecting writing +~~~~~~~~~~~~~~~~~~~ + +If using a library that can print messages to the console, editing the library +by replacing ``print()`` with ``tqdm.write()`` may not be desirable. +In that case, redirecting ``sys.stdout`` to ``tqdm.write()`` is an option. + +To redirect ``sys.stdout``, create a file-like class that will write +any input string to ``tqdm.write()``, and supply the arguments +``file=sys.stdout, dynamic_ncols=True``. + +A reusable canonical example is given below: + +.. code:: python + + from time import sleep + import contextlib + import sys + from tqdm import tqdm + from tqdm.contrib import DummyTqdmFile + + + @contextlib.contextmanager + def std_out_err_redirect_tqdm(): + orig_out_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err) + yield orig_out_err[0] + # Relay exceptions + except Exception as exc: + raise exc + # Always restore sys.stdout/err if necessary + finally: + sys.stdout, sys.stderr = orig_out_err + + def some_fun(i): + print("Fee, fi, fo,".split()[i]) + + # Redirect stdout to tqdm.write() (don't forget the `as save_stdout`) + with std_out_err_redirect_tqdm() as orig_stdout: + # tqdm needs the original stdout + # and dynamic_ncols=True to autodetect console width + for i in tqdm(range(3), file=orig_stdout, dynamic_ncols=True): + sleep(.5) + some_fun(i) + + # After the `with`, printing is restored + print("Done!") + +Redirecting ``logging`` +~~~~~~~~~~~~~~~~~~~~~~~ + +Similar to ``sys.stdout``/``sys.stderr`` as detailed above, console ``logging`` +may also be redirected to ``tqdm.write()``. + +Warning: if also redirecting ``sys.stdout``/``sys.stderr``, make sure to +redirect ``logging`` first if needed. + +Helper methods are available in ``tqdm.contrib.logging``. For example: + +.. code:: python + + import logging + from tqdm import trange + from tqdm.contrib.logging import logging_redirect_tqdm + + LOG = logging.getLogger(__name__) + + if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + with logging_redirect_tqdm(): + for i in trange(9): + if i == 4: + LOG.info("console logging redirected to `tqdm.write()`") + # logging restored + +Monitoring thread, intervals and miniters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``tqdm`` implements a few tricks to increase efficiency and reduce overhead. + +- Avoid unnecessary frequent bar refreshing: ``mininterval`` defines how long + to wait between each refresh. ``tqdm`` always gets updated in the background, + but it will display only every ``mininterval``. +- Reduce number of calls to check system clock/time. +- ``mininterval`` is more intuitive to configure than ``miniters``. + A clever adjustment system ``dynamic_miniters`` will automatically adjust + ``miniters`` to the amount of iterations that fit into time ``mininterval``. + Essentially, ``tqdm`` will check if it's time to print without actually + checking time. This behaviour can be still be bypassed by manually setting + ``miniters``. + +However, consider a case with a combination of fast and slow iterations. +After a few fast iterations, ``dynamic_miniters`` will set ``miniters`` to a +large number. When iteration rate subsequently slows, ``miniters`` will +remain large and thus reduce display update frequency. To address this: + +- ``maxinterval`` defines the maximum time between display refreshes. + A concurrent monitoring thread checks for overdue updates and forces one + where necessary. + +The monitoring thread should not have a noticeable overhead, and guarantees +updates at least every 10 seconds by default. +This value can be directly changed by setting the ``monitor_interval`` of +any ``tqdm`` instance (i.e. ``t = tqdm.tqdm(...); t.monitor_interval = 2``). +The monitor thread may be disabled application-wide by setting +``tqdm.tqdm.monitor_interval = 0`` before instantiation of any ``tqdm`` bar. + + +Merch +----- + +You can buy `tqdm branded merch `__ now! + +Contributions +------------- + +|GitHub-Commits| |GitHub-Issues| |GitHub-PRs| |OpenHub-Status| |GitHub-Contributions| |CII Best Practices| + +All source code is hosted on `GitHub `__. +Contributions are welcome. + +See the +`CONTRIBUTING `__ +file for more information. + +Developers who have made significant contributions, ranked by *SLoC* +(surviving lines of code, +`git fame `__ ``-wMC --excl '\.(png|gif|jpg)$'``), +are: + +==================== ======================================================== ==== ================================ +Name ID SLoC Notes +==================== ======================================================== ==== ================================ +Casper da Costa-Luis `casperdcl `__ ~80% primary maintainer |Gift-Casper| +Stephen Larroque `lrq3000 `__ ~9% team member +Martin Zugnoni `martinzugnoni `__ ~3% +Daniel Ecer `de-code `__ ~2% +Richard Sheridan `richardsheridan `__ ~1% +Guangshuo Chen `chengs `__ ~1% +Helio Machado `0x2b3bfa0 `__ ~1% +Kyle Altendorf `altendky `__ <1% +Noam Yorav-Raphael `noamraph `__ <1% original author +Matthew Stevens `mjstevens777 `__ <1% +Hadrien Mary `hadim `__ <1% team member +Mikhail Korobov `kmike `__ <1% team member +==================== ======================================================== ==== ================================ + +Ports to Other Languages +~~~~~~~~~~~~~~~~~~~~~~~~ + +A list is available on +`this wiki page `__. + + +LICENCE +------- + +Open Source (OSI approved): |LICENCE| + +Citation information: |DOI| + +|README-Hits| (Since 19 May 2016) + +.. |Logo| image:: https://tqdm.github.io/img/logo.gif +.. |Screenshot| image:: https://tqdm.github.io/img/tqdm.gif +.. |Video| image:: https://tqdm.github.io/img/video.jpg + :target: https://tqdm.github.io/video +.. |Slides| image:: https://tqdm.github.io/img/slides.jpg + :target: https://tqdm.github.io/PyData2019/slides.html +.. |Merch| image:: https://tqdm.github.io/img/merch.jpg + :target: https://tqdm.github.io/merch +.. |Build-Status| image:: https://img.shields.io/github/actions/workflow/status/tqdm/tqdm/test.yml?branch=master&label=tqdm&logo=GitHub + :target: https://github.com/tqdm/tqdm/actions/workflows/test.yml +.. |Coverage-Status| image:: https://img.shields.io/coveralls/github/tqdm/tqdm/master?logo=coveralls + :target: https://coveralls.io/github/tqdm/tqdm +.. |Branch-Coverage-Status| image:: https://codecov.io/gh/tqdm/tqdm/branch/master/graph/badge.svg + :target: https://codecov.io/gh/tqdm/tqdm +.. |Codacy-Grade| image:: https://app.codacy.com/project/badge/Grade/3f965571598f44549c7818f29cdcf177 + :target: https://www.codacy.com/gh/tqdm/tqdm/dashboard +.. |CII Best Practices| image:: https://bestpractices.coreinfrastructure.org/projects/3264/badge + :target: https://bestpractices.coreinfrastructure.org/projects/3264 +.. |GitHub-Status| image:: https://img.shields.io/github/tag/tqdm/tqdm.svg?maxAge=86400&logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/releases +.. |GitHub-Forks| image:: https://img.shields.io/github/forks/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/network +.. |GitHub-Stars| image:: https://img.shields.io/github/stars/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/stargazers +.. |GitHub-Commits| image:: https://img.shields.io/github/commit-activity/y/tqdm/tqdm.svg?logo=git&logoColor=white + :target: https://github.com/tqdm/tqdm/graphs/commit-activity +.. |GitHub-Issues| image:: https://img.shields.io/github/issues-closed/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/issues?q= +.. |GitHub-PRs| image:: https://img.shields.io/github/issues-pr-closed/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/pulls +.. |GitHub-Contributions| image:: https://img.shields.io/github/contributors/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/graphs/contributors +.. |GitHub-Updated| image:: https://img.shields.io/github/last-commit/tqdm/tqdm/master.svg?logo=github&logoColor=white&label=pushed + :target: https://github.com/tqdm/tqdm/pulse +.. |Gift-Casper| image:: https://img.shields.io/badge/dynamic/json.svg?color=ff69b4&label=gifts%20received&prefix=%C2%A3&query=%24..sum&url=https%3A%2F%2Fcaspersci.uk.to%2Fgifts.json + :target: https://cdcl.ml/sponsor +.. |Versions| image:: https://img.shields.io/pypi/v/tqdm.svg + :target: https://tqdm.github.io/releases +.. |PyPI-Downloads| image:: https://img.shields.io/pypi/dm/tqdm.svg?label=pypi%20downloads&logo=PyPI&logoColor=white + :target: https://pepy.tech/project/tqdm +.. |Py-Versions| image:: https://img.shields.io/pypi/pyversions/tqdm.svg?logo=python&logoColor=white + :target: https://pypi.org/project/tqdm +.. |Conda-Forge-Status| image:: https://img.shields.io/conda/v/conda-forge/tqdm.svg?label=conda-forge&logo=conda-forge + :target: https://anaconda.org/conda-forge/tqdm +.. |Snapcraft| image:: https://img.shields.io/badge/snap-install-82BEA0.svg?logo=snapcraft + :target: https://snapcraft.io/tqdm +.. |Docker| image:: https://img.shields.io/badge/docker-pull-blue.svg?logo=docker&logoColor=white + :target: https://hub.docker.com/r/tqdm/tqdm +.. |Libraries-Rank| image:: https://img.shields.io/librariesio/sourcerank/pypi/tqdm.svg?logo=koding&logoColor=white + :target: https://libraries.io/pypi/tqdm +.. |Libraries-Dependents| image:: https://img.shields.io/librariesio/dependent-repos/pypi/tqdm.svg?logo=koding&logoColor=white + :target: https://github.com/tqdm/tqdm/network/dependents +.. |OpenHub-Status| image:: https://www.openhub.net/p/tqdm/widgets/project_thin_badge?format=gif + :target: https://www.openhub.net/p/tqdm?ref=Thin+badge +.. |awesome-python| image:: https://awesome.re/mentioned-badge.svg + :target: https://github.com/vinta/awesome-python +.. |LICENCE| image:: https://img.shields.io/pypi/l/tqdm.svg + :target: https://raw.githubusercontent.com/tqdm/tqdm/master/LICENCE +.. |DOI| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.595120-blue.svg + :target: https://doi.org/10.5281/zenodo.595120 +.. |binder-demo| image:: https://mybinder.org/badge_logo.svg + :target: https://mybinder.org/v2/gh/tqdm/tqdm/master?filepath=DEMO.ipynb +.. |Screenshot-Jupyter1| image:: https://tqdm.github.io/img/jupyter-1.gif +.. |Screenshot-Jupyter2| image:: https://tqdm.github.io/img/jupyter-2.gif +.. |Screenshot-Jupyter3| image:: https://tqdm.github.io/img/jupyter-3.gif +.. |README-Hits| image:: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&style=social&r=https://github.com/tqdm/tqdm&l=https://tqdm.github.io/img/favicon.png&f=https://tqdm.github.io/img/logo.gif + :target: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&a=plot&r=https://github.com/tqdm/tqdm&l=https://tqdm.github.io/img/favicon.png&f=https://tqdm.github.io/img/logo.gif&style=social diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..16f0856f7a4aa63bd55ba3e60602b5ffcc5c8af6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/RECORD @@ -0,0 +1,74 @@ +../../../bin/tqdm,sha256=l74ZRUN4mHWMCfAbLv4bJWVmtaqDvpSZBUsHZCcA8Ac,236 +tqdm-4.66.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tqdm-4.66.2.dist-info/LICENCE,sha256=3DMlLoKQFeOxUAhvubOkD2rW-zLC9GEM6BL6Z301mGo,1985 +tqdm-4.66.2.dist-info/METADATA,sha256=hqX0to0oQdiQK_4I-kKeZTMZVBuSLqQlR-stAlZWUe4,57587 +tqdm-4.66.2.dist-info/RECORD,, +tqdm-4.66.2.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +tqdm-4.66.2.dist-info/entry_points.txt,sha256=ReJCH7Ui3Zyh6M16E4OhsZ1oU7WtMXCfbtoyBhGO29Y,39 +tqdm-4.66.2.dist-info/top_level.txt,sha256=NLiUJNfmc9At15s7JURiwvqMEjUi9G5PMGRrmMYzNSM,5 +tqdm/__init__.py,sha256=9mQNYSSqP99JasubEC1POJLMmhkkBH6cJZxPIR5G2pQ,1572 +tqdm/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 +tqdm/__pycache__/__init__.cpython-310.pyc,, +tqdm/__pycache__/__main__.cpython-310.pyc,, +tqdm/__pycache__/_dist_ver.cpython-310.pyc,, +tqdm/__pycache__/_main.cpython-310.pyc,, +tqdm/__pycache__/_monitor.cpython-310.pyc,, +tqdm/__pycache__/_tqdm.cpython-310.pyc,, +tqdm/__pycache__/_tqdm_gui.cpython-310.pyc,, +tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc,, +tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc,, +tqdm/__pycache__/_utils.cpython-310.pyc,, +tqdm/__pycache__/asyncio.cpython-310.pyc,, +tqdm/__pycache__/auto.cpython-310.pyc,, +tqdm/__pycache__/autonotebook.cpython-310.pyc,, +tqdm/__pycache__/cli.cpython-310.pyc,, +tqdm/__pycache__/dask.cpython-310.pyc,, +tqdm/__pycache__/gui.cpython-310.pyc,, +tqdm/__pycache__/keras.cpython-310.pyc,, +tqdm/__pycache__/notebook.cpython-310.pyc,, +tqdm/__pycache__/rich.cpython-310.pyc,, +tqdm/__pycache__/std.cpython-310.pyc,, +tqdm/__pycache__/tk.cpython-310.pyc,, +tqdm/__pycache__/utils.cpython-310.pyc,, +tqdm/__pycache__/version.cpython-310.pyc,, +tqdm/_dist_ver.py,sha256=WtMQthlgvWIN7EQ42ET8BE1M6YXdXKafzqbKD3PLmGk,23 +tqdm/_main.py,sha256=9ySvgmi_2Sw4CAo5UDW0Q2dxfTryboEWGHohfCJz0sA,283 +tqdm/_monitor.py,sha256=Uku-DPWgzJ7dO5CK08xKJK-E_F6qQ-JB3ksuXczSYR0,3699 +tqdm/_tqdm.py,sha256=LfLCuJ6bpsVo9xilmtBXyEm1vGnUCFrliW85j3J-nD4,283 +tqdm/_tqdm_gui.py,sha256=03Hc8KayxJveieI5-0-2NGiDpLvw9jZekofJUV7CCwk,287 +tqdm/_tqdm_notebook.py,sha256=BuHiLuxu6uEfZFaPJW3RPpPaxaVctEQA3kdSJSDL1hw,307 +tqdm/_tqdm_pandas.py,sha256=c9jptUgigN6axRDhRd4Rif98Tmxeopc1nFNFhIpbFUE,888 +tqdm/_utils.py,sha256=_4E73bfDj4f1s3sM42NLHNrZDOkijZoWq-n6xWLkdZ8,553 +tqdm/asyncio.py,sha256=WcWbVEjc1-GxqnN0BVntDuwYR31JN9SV7ERZhEz8kKo,2775 +tqdm/auto.py,sha256=nDZflj6p2zKkjBCNBourrhS81zYfZy1_dQvbckrdW8o,871 +tqdm/autonotebook.py,sha256=Yb9F5uaiBPhfbDDFpbtoG8I2YUw3uQJ89rUDLbfR6ws,956 +tqdm/cli.py,sha256=RZh5sKVclaNp_7tZ3iCXWJB3-V6KfeQNNw7mZ818gMc,10594 +tqdm/completion.sh,sha256=j79KbSmpIj_E11jfTfBXrGnUTzKXVpQ1vGVQvsyDRl4,946 +tqdm/contrib/__init__.py,sha256=cNNaRURdcPjbQpkxPGe4iaShyQr_Dx8h6NQJubPhq7g,2513 +tqdm/contrib/__pycache__/__init__.cpython-310.pyc,, +tqdm/contrib/__pycache__/bells.cpython-310.pyc,, +tqdm/contrib/__pycache__/concurrent.cpython-310.pyc,, +tqdm/contrib/__pycache__/discord.cpython-310.pyc,, +tqdm/contrib/__pycache__/itertools.cpython-310.pyc,, +tqdm/contrib/__pycache__/logging.cpython-310.pyc,, +tqdm/contrib/__pycache__/slack.cpython-310.pyc,, +tqdm/contrib/__pycache__/telegram.cpython-310.pyc,, +tqdm/contrib/__pycache__/utils_worker.cpython-310.pyc,, +tqdm/contrib/bells.py,sha256=Yx1HqGCmHrESCAO700j5wE__JCleNODJxedh1ijPLD0,837 +tqdm/contrib/concurrent.py,sha256=K1yjloKS5WRNFyjLRth0DmU5PAnDbF0A-GD27N-J4a8,3986 +tqdm/contrib/discord.py,sha256=hhbOL1VGTWXQ4z1RUsIybhga7oUMYH5CoAkNWTt7t70,3962 +tqdm/contrib/itertools.py,sha256=WdKKQU5eSzsqHu29SN_oH12huYZo0Jihqoi9-nVhwz4,774 +tqdm/contrib/logging.py,sha256=aUvbBPGm0jetH22p1H5bobkdzewwYpXkdO8C_6nmAJk,3785 +tqdm/contrib/slack.py,sha256=CYIzKBgbk0azM19F4kSA8Ccod9I8hNfuscbcc9kjGYU,4068 +tqdm/contrib/telegram.py,sha256=ICWNBIb-l-quSgjKw4PhRdrH2_bKk7umy6BfEsp9EuU,5100 +tqdm/contrib/utils_worker.py,sha256=HJP5Mz1S1xyzEke2JaqJ2sYLHXADYoo2epT5AzQ38eA,1207 +tqdm/dask.py,sha256=RWR3rz9s36zNceeESTm-F3H1F7dxuBuqS1bhifOuvEY,1337 +tqdm/gui.py,sha256=LhGizt5G1w4f17OjFRBWza8MEmQZjrFgTx9Y9Lc7-gA,5809 +tqdm/keras.py,sha256=op9sBkb6q6c6dw2wJ0SD2ZwpPK7yM1Vbg4l1Qiy3MIo,4373 +tqdm/notebook.py,sha256=A6waQCPa0Ud3OrByzZwj91STnhwoJ1Ui3THrmQBZjqE,11023 +tqdm/rich.py,sha256=8SamiW-LlUKEMQwzubomHCXkX6awaz-7Tq-gnMkJJ5A,5018 +tqdm/std.py,sha256=AuM80Fy023Ex6N7J8TP8XaJehJLo8OnjIfLk9r7j7T4,57478 +tqdm/tk.py,sha256=Hp9QwXTsihURoxt5aXbIe8Cu1qjLBZzkhfl_nAgD418,6727 +tqdm/tqdm.1,sha256=aILyUPk2S4OPe_uWy2P4AMjUf0oQ6PUW0nLYXB-BWwI,7889 +tqdm/utils.py,sha256=abBTZZ38isuYMR38AgZ-nQzC-KvRn8M-mFCuzFpgsuA,11883 +tqdm/version.py,sha256=-1yWjfu3P0eghVsysHH07fbzdiADNRdzRtYPqOaqR2A,333 diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..540e60f4e073bc53a5f0a521a3639e0d80780af4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +tqdm = tqdm.cli:main diff --git a/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..78620c472c9d799a14ccb02a0233f4669b3bcdcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm-4.66.2.dist-info/top_level.txt @@ -0,0 +1 @@ +tqdm diff --git a/venv/lib/python3.10/site-packages/zstandard/__init__.py b/venv/lib/python3.10/site-packages/zstandard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8588729004b2c22b2e2150679d63120e122579a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/zstandard/__init__.py @@ -0,0 +1,210 @@ +# Copyright (c) 2017-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +"""Python interface to the Zstandard (zstd) compression library.""" + +from __future__ import absolute_import, unicode_literals + +# This module serves 2 roles: +# +# 1) Export the C or CFFI "backend" through a central module. +# 2) Implement additional functionality built on top of C or CFFI backend. + +import builtins +import io +import os +import platform + +from typing import ByteString + +# Some Python implementations don't support C extensions. That's why we have +# a CFFI implementation in the first place. The code here import one of our +# "backends" then re-exports the symbols from this module. For convenience, +# we support falling back to the CFFI backend if the C extension can't be +# imported. But for performance reasons, we only do this on unknown Python +# implementation. Notably, for CPython we require the C extension by default. +# Because someone will inevitably want special behavior, the behavior is +# configurable via an environment variable. A potentially better way to handle +# this is to import a special ``__importpolicy__`` module or something +# defining a variable and `setup.py` could write the file with whatever +# policy was specified at build time. Until someone needs it, we go with +# the hacky but simple environment variable approach. +_module_policy = os.environ.get("PYTHON_ZSTANDARD_IMPORT_POLICY", "default") + +if _module_policy == "default": + if platform.python_implementation() in ("CPython",): + from .backend_c import * # type: ignore + + backend = "cext" + elif platform.python_implementation() in ("PyPy",): + from .backend_cffi import * # type: ignore + + backend = "cffi" + else: + try: + from .backend_c import * + + backend = "cext" + except ImportError: + from .backend_cffi import * + + backend = "cffi" +elif _module_policy == "cffi_fallback": + try: + from .backend_c import * + + backend = "cext" + except ImportError: + from .backend_cffi import * + + backend = "cffi" +elif _module_policy == "rust": + from .backend_rust import * # type: ignore + + backend = "rust" +elif _module_policy == "cext": + from .backend_c import * + + backend = "cext" +elif _module_policy == "cffi": + from .backend_cffi import * + + backend = "cffi" +else: + raise ImportError( + "unknown module import policy: %s; use default, cffi_fallback, " + "cext, or cffi" % _module_policy + ) + +# Keep this in sync with python-zstandard.h, rust-ext/src/lib.rs, and debian/changelog. +__version__ = "0.22.0" + +_MODE_CLOSED = 0 +_MODE_READ = 1 +_MODE_WRITE = 2 + + +def open( + filename, + mode="rb", + cctx=None, + dctx=None, + encoding=None, + errors=None, + newline=None, + closefd=None, +): + """Create a file object with zstd (de)compression. + + The object returned from this function will be a + :py:class:`ZstdDecompressionReader` if opened for reading in binary mode, + a :py:class:`ZstdCompressionWriter` if opened for writing in binary mode, + or an ``io.TextIOWrapper`` if opened for reading or writing in text mode. + + :param filename: + ``bytes``, ``str``, or ``os.PathLike`` defining a file to open or a + file object (with a ``read()`` or ``write()`` method). + :param mode: + ``str`` File open mode. Accepts any of the open modes recognized by + ``open()``. + :param cctx: + ``ZstdCompressor`` to use for compression. If not specified and file + is opened for writing, the default ``ZstdCompressor`` will be used. + :param dctx: + ``ZstdDecompressor`` to use for decompression. If not specified and file + is opened for reading, the default ``ZstdDecompressor`` will be used. + :param encoding: + ``str`` that defines text encoding to use when file is opened in text + mode. + :param errors: + ``str`` defining text encoding error handling mode. + :param newline: + ``str`` defining newline to use in text mode. + :param closefd: + ``bool`` whether to close the file when the returned object is closed. + Only used if a file object is passed. If a filename is specified, the + opened file is always closed when the returned object is closed. + """ + normalized_mode = mode.replace("t", "") + + if normalized_mode in ("r", "rb"): + dctx = dctx or ZstdDecompressor() + open_mode = "r" + raw_open_mode = "rb" + elif normalized_mode in ("w", "wb", "a", "ab", "x", "xb"): + cctx = cctx or ZstdCompressor() + open_mode = "w" + raw_open_mode = normalized_mode + if not raw_open_mode.endswith("b"): + raw_open_mode = raw_open_mode + "b" + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if hasattr(os, "PathLike"): + types = (str, bytes, os.PathLike) + else: + types = (str, bytes) + + if isinstance(filename, types): # type: ignore + inner_fh = builtins.open(filename, raw_open_mode) + closefd = True + elif hasattr(filename, "read") or hasattr(filename, "write"): + inner_fh = filename + closefd = bool(closefd) + else: + raise TypeError( + "filename must be a str, bytes, file or PathLike object" + ) + + if open_mode == "r": + fh = dctx.stream_reader(inner_fh, closefd=closefd) + elif open_mode == "w": + fh = cctx.stream_writer(inner_fh, closefd=closefd) + else: + raise RuntimeError("logic error in zstandard.open() handling open mode") + + if "b" not in normalized_mode: + return io.TextIOWrapper( + fh, encoding=encoding, errors=errors, newline=newline + ) + else: + return fh + + +def compress(data: ByteString, level: int = 3) -> bytes: + """Compress source data using the zstd compression format. + + This performs one-shot compression using basic/default compression + settings. + + This method is provided for convenience and is equivalent to calling + ``ZstdCompressor(level=level).compress(data)``. + + If you find yourself calling this function in a tight loop, + performance will be greater if you construct a single ``ZstdCompressor`` + and repeatedly call ``compress()`` on it. + """ + cctx = ZstdCompressor(level=level) + + return cctx.compress(data) + + +def decompress(data: ByteString, max_output_size: int = 0) -> bytes: + """Decompress a zstd frame into its original data. + + This performs one-shot decompression using basic/default compression + settings. + + This method is provided for convenience and is equivalent to calling + ``ZstdDecompressor().decompress(data, max_output_size=max_output_size)``. + + If you find yourself calling this function in a tight loop, performance + will be greater if you construct a single ``ZstdDecompressor`` and + repeatedly call ``decompress()`` on it. + """ + dctx = ZstdDecompressor() + + return dctx.decompress(data, max_output_size=max_output_size) diff --git a/venv/lib/python3.10/site-packages/zstandard/__init__.pyi b/venv/lib/python3.10/site-packages/zstandard/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c95a73e89b9a3bbcf740cc5daf63d16a92472130 --- /dev/null +++ b/venv/lib/python3.10/site-packages/zstandard/__init__.pyi @@ -0,0 +1,480 @@ +# Copyright (c) 2016-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +from typing import ( + BinaryIO, + ByteString, + Generator, + IO, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) + +FLUSH_BLOCK: int +FLUSH_FRAME: int + +COMPRESSOBJ_FLUSH_FINISH: int +COMPRESSOBJ_FLUSH_BLOCK: int + +CONTENTSIZE_UNKNOWN: int +CONTENTSIZE_ERROR: int + +MAX_COMPRESSION_LEVEL: int + +COMPRESSION_RECOMMENDED_INPUT_SIZE: int +COMPRESSION_RECOMMENDED_OUTPUT_SIZE: int + +DECOMPRESSION_RECOMMENDED_INPUT_SIZE: int +DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE: int + +BLOCKSIZELOG_MAX: int +BLOCKSIZE_MAX: int + +WINDOWLOG_MIN: int +WINDOWLOG_MAX: int + +CHAINLOG_MIN: int +CHAINLOG_MAX: int +HASHLOG_MIN: int +HASHLOG_MAX: int +MINMATCH_MIN: int +MINMATCH_MAX: int +SEARCHLOG_MIN: int +SEARCHLOG_MAX: int +SEARCHLENGTH_MIN: int +SEARCHLENGTH_MAX: int +TARGETLENGTH_MIN: int +TARGETLENGTH_MAX: int +LDM_MINMATCH_MIN: int +LDM_MINMATCH_MAX: int +LDM_BUCKETSIZELOG_MAX: int + +STRATEGY_FAST: int +STRATEGY_DFAST: int +STRATEGY_GREEDY: int +STRATEGY_LAZY: int +STRATEGY_LAZY2: int +STRATEGY_BTLAZY2: int +STRATEGY_BTOPT: int +STRATEGY_BTULTRA: int +STRATEGY_BTULTRA2: int + +DICT_TYPE_AUTO: int +DICT_TYPE_RAWCONTENT: int +DICT_TYPE_FULLDICT: int + +FORMAT_ZSTD1: int +FORMAT_ZSTD1_MAGICLESS: int + +ZSTD_VERSION: Tuple[int, int, int] +FRAME_HEADER: bytes +MAGIC_NUMBER: int + +backend: str +backend_features: Set[str] +__version__: str + +class ZstdError(Exception): ... + +class BufferSegment(object): + offset: int + def __len__(self) -> int: ... + def tobytes(self) -> bytes: ... + +class BufferSegments(object): + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + +class BufferWithSegments(object): + size: int + def __init__(self, data: ByteString, segments: ByteString): ... + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + def segments(self): ... + def tobytes(self) -> bytes: ... + +class BufferWithSegmentsCollection(object): + def __init__(self, *args): ... + def __len__(self) -> int: ... + def __getitem__(self, i: int) -> BufferSegment: ... + def size(self) -> int: ... + +class ZstdCompressionParameters(object): + @staticmethod + def from_level( + level: int, source_size: int = ..., dict_size: int = ..., **kwargs + ) -> "ZstdCompressionParameters": ... + def __init__( + self, + format: int = ..., + compression_level: int = ..., + window_log: int = ..., + hash_log: int = ..., + chain_log: int = ..., + search_log: int = ..., + min_match: int = ..., + target_length: int = ..., + strategy: int = ..., + write_content_size: int = ..., + write_checksum: int = ..., + write_dict_id: int = ..., + job_size: int = ..., + overlap_log: int = ..., + force_max_window: int = ..., + enable_ldm: int = ..., + ldm_hash_log: int = ..., + ldm_min_match: int = ..., + ldm_bucket_size_log: int = ..., + ldm_hash_rate_log: int = ..., + threads: int = ..., + ): ... + @property + def format(self) -> int: ... + @property + def compression_level(self) -> int: ... + @property + def window_log(self) -> int: ... + @property + def hash_log(self) -> int: ... + @property + def chain_log(self) -> int: ... + @property + def search_log(self) -> int: ... + @property + def min_match(self) -> int: ... + @property + def target_length(self) -> int: ... + @property + def strategy(self) -> int: ... + @property + def write_content_size(self) -> int: ... + @property + def write_checksum(self) -> int: ... + @property + def write_dict_id(self) -> int: ... + @property + def job_size(self) -> int: ... + @property + def overlap_log(self) -> int: ... + @property + def force_max_window(self) -> int: ... + @property + def enable_ldm(self) -> int: ... + @property + def ldm_hash_log(self) -> int: ... + @property + def ldm_min_match(self) -> int: ... + @property + def ldm_bucket_size_log(self) -> int: ... + @property + def ldm_hash_rate_log(self) -> int: ... + @property + def threads(self) -> int: ... + def estimated_compression_context_size(self) -> int: ... + +class CompressionParameters(ZstdCompressionParameters): ... + +class ZstdCompressionDict(object): + k: int + d: int + def __init__( + self, + data: ByteString, + dict_type: int = ..., + k: int = ..., + d: int = ..., + ): ... + def __len__(self) -> int: ... + def dict_id(self) -> int: ... + def as_bytes(self) -> bytes: ... + def precompute_compress( + self, + level: int = ..., + compression_params: ZstdCompressionParameters = ..., + ): ... + +class ZstdCompressionObj(object): + def compress(self, data: ByteString) -> bytes: ... + def flush(self, flush_mode: int = ...) -> bytes: ... + +class ZstdCompressionChunker(object): + def compress(self, data: ByteString): ... + def flush(self): ... + def finish(self): ... + +class ZstdCompressionReader(BinaryIO): + def __enter__(self) -> "ZstdCompressionReader": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def readable(self) -> bool: ... + def writable(self) -> bool: ... + def seekable(self) -> bool: ... + def readline(self, limit: int = ...) -> bytes: ... + def readlines(self, hint: int = ...) -> List[bytes]: ... + def write(self, data: ByteString): ... + def writelines(self, data: Iterable[bytes]): ... + def isatty(self) -> bool: ... + def flush(self): ... + def close(self): ... + @property + def closed(self) -> bool: ... + def tell(self) -> int: ... + def readall(self) -> bytes: ... + def __iter__(self): ... + def __next__(self): ... + def next(self): ... + def read(self, size: int = ...) -> bytes: ... + def read1(self, size: int = ...) -> bytes: ... + def readinto(self, b) -> int: ... + def readinto1(self, b) -> int: ... + +class ZstdCompressionWriter(BinaryIO): + def __enter__(self) -> "ZstdCompressionWriter": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def memory_size(self) -> int: ... + def fileno(self) -> int: ... + def close(self): ... + @property + def closed(self) -> bool: ... + def isatty(self) -> bool: ... + def readable(self) -> bool: ... + def readline(self, size: int = ...) -> bytes: ... + def readlines(self, hint: int = ...) -> List[bytes]: ... + def seek(self, offset: int, whence: int = ...): ... + def seekable(self) -> bool: ... + def truncate(self, size: int = ...): ... + def writable(self) -> bool: ... + def writelines(self, lines: Iterable[bytes]): ... + def read(self, size: int = ...) -> bytes: ... + def readall(self) -> bytes: ... + def readinto(self, b): ... + def write(self, data: ByteString) -> int: ... + def flush(self, flush_mode: int = ...) -> int: ... + def tell(self) -> int: ... + +class ZstdCompressor(object): + def __init__( + self, + level: int = ..., + dict_data: Optional[ZstdCompressionDict] = ..., + compression_params: Optional[ZstdCompressionParameters] = ..., + write_checksum: Optional[bool] = ..., + write_content_size: Optional[bool] = ..., + write_dict_id: Optional[bool] = ..., + threads: int = ..., + ): ... + def memory_size(self) -> int: ... + def compress(self, data: ByteString) -> bytes: ... + def compressobj(self, size: int = ...) -> ZstdCompressionObj: ... + def chunker( + self, size: int = ..., chunk_size: int = ... + ) -> ZstdCompressionChunker: ... + def copy_stream( + self, + ifh: IO[bytes], + ofh: IO[bytes], + size: int = ..., + read_size: int = ..., + write_size: int = ..., + ) -> Tuple[int, int]: ... + def stream_reader( + self, + source: Union[IO[bytes], ByteString], + size: int = ..., + read_size: int = ..., + *, + closefd: bool = ..., + ) -> ZstdCompressionReader: ... + def stream_writer( + self, + writer: IO[bytes], + size: int = ..., + write_size: int = ..., + write_return_read: bool = ..., + *, + closefd: bool = ..., + ) -> ZstdCompressionWriter: ... + def read_to_iter( + self, + reader: Union[IO[bytes], ByteString], + size: int = ..., + read_size: int = ..., + write_size: int = ..., + ) -> Generator[bytes, None, None]: ... + def frame_progression(self) -> Tuple[int, int, int]: ... + def multi_compress_to_buffer( + self, + data: Union[ + BufferWithSegments, + BufferWithSegmentsCollection, + List[ByteString], + ], + threads: int = ..., + ) -> BufferWithSegmentsCollection: ... + +class ZstdDecompressionObj(object): + def decompress(self, data: ByteString) -> bytes: ... + def flush(self, length: int = ...) -> bytes: ... + @property + def unused_data(self) -> bytes: ... + @property + def unconsumed_tail(self) -> bytes: ... + @property + def eof(self) -> bool: ... + +class ZstdDecompressionReader(BinaryIO): + def __enter__(self) -> "ZstdDecompressionReader": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def readable(self) -> bool: ... + def writable(self) -> bool: ... + def seekable(self) -> bool: ... + def readline(self, size: int = ...): ... + def readlines(self, hint: int = ...): ... + def write(self, data: ByteString): ... + def writelines(self, lines: Iterable[bytes]): ... + def isatty(self) -> bool: ... + def flush(self): ... + def close(self): ... + @property + def closed(self) -> bool: ... + def tell(self) -> int: ... + def readall(self) -> bytes: ... + def __iter__(self): ... + def __next__(self): ... + def next(self): ... + def read(self, size: int = ...) -> bytes: ... + def readinto(self, b) -> int: ... + def read1(self, size: int = ...) -> bytes: ... + def readinto1(self, b) -> int: ... + def seek(self, pos: int, whence: int = ...) -> int: ... + +class ZstdDecompressionWriter(BinaryIO): + def __enter__(self) -> "ZstdDecompressionWriter": ... + def __exit__(self, exc_type, exc_value, exc_tb): ... + def memory_size(self) -> int: ... + def close(self): ... + @property + def closed(self) -> bool: ... + def fileno(self) -> int: ... + def flush(self): ... + def isatty(self) -> bool: ... + def readable(self) -> bool: ... + def readline(self, size: int = ...): ... + def readlines(self, hint: int = ...): ... + def seek(self, offset: int, whence: int = ...): ... + def seekable(self) -> bool: ... + def tell(self): ... + def truncate(self, size: int = ...): ... + def writable(self) -> bool: ... + def writelines(self, lines: Iterable[bytes]): ... + def read(self, size: int = ...): ... + def readall(self): ... + def readinto(self, b): ... + def write(self, data: ByteString) -> int: ... + +class ZstdDecompressor(object): + def __init__( + self, + dict_data: Optional[ZstdCompressionDict] = ..., + max_window_size: int = ..., + format: int = ..., + ): ... + def memory_size(self) -> int: ... + def decompress( + self, + data: ByteString, + max_output_size: int = ..., + read_across_frames: bool = ..., + allow_extra_data: bool = ..., + ) -> bytes: ... + def stream_reader( + self, + source: Union[IO[bytes], ByteString], + read_size: int = ..., + read_across_frames: bool = ..., + *, + closefd=False, + ) -> ZstdDecompressionReader: ... + def decompressobj( + self, write_size: int = ..., read_across_frames: bool = False + ) -> ZstdDecompressionObj: ... + def read_to_iter( + self, + reader: Union[IO[bytes], ByteString], + read_size: int = ..., + write_size: int = ..., + skip_bytes: int = ..., + ) -> Generator[bytes, None, None]: ... + def stream_writer( + self, + writer: IO[bytes], + write_size: int = ..., + write_return_read: bool = ..., + *, + closefd: bool = ..., + ) -> ZstdDecompressionWriter: ... + def copy_stream( + self, + ifh: IO[bytes], + ofh: IO[bytes], + read_size: int = ..., + write_size: int = ..., + ) -> Tuple[int, int]: ... + def decompress_content_dict_chain( + self, frames: list[ByteString] + ) -> bytes: ... + def multi_decompress_to_buffer( + self, + frames: Union[ + BufferWithSegments, + BufferWithSegmentsCollection, + List[ByteString], + ], + decompressed_sizes: ByteString = ..., + threads: int = ..., + ) -> BufferWithSegmentsCollection: ... + +class FrameParameters(object): + content_size: int + window_size: int + dict_id: int + has_checksum: bool + +def estimate_decompression_context_size() -> int: ... +def frame_content_size(data: ByteString) -> int: ... +def frame_header_size(data: ByteString) -> int: ... +def get_frame_parameters(data: ByteString) -> FrameParameters: ... +def train_dictionary( + dict_size: int, + samples: list[ByteString], + k: int = ..., + d: int = ..., + f: int = ..., + split_point: float = ..., + accel: int = ..., + notifications: int = ..., + dict_id: int = ..., + level: int = ..., + steps: int = ..., + threads: int = ..., +) -> ZstdCompressionDict: ... +def open( + filename: Union[bytes, str, os.PathLike, BinaryIO], + mode: str = ..., + cctx: Optional[ZstdCompressor] = ..., + dctx: Optional[ZstdDecompressor] = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + closefd: bool = ..., +): ... +def compress(data: ByteString, level: int = ...) -> bytes: ... +def decompress(data: ByteString, max_output_size: int = ...) -> bytes: ... diff --git a/venv/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..625fdfdc59c338b216c71de19f3466b2790cbec3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/zstandard/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc b/venv/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb5061b65736d29536550d5eb62d092766d3fb5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/zstandard/__pycache__/backend_cffi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/zstandard/backend_cffi.py b/venv/lib/python3.10/site-packages/zstandard/backend_cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..7137542f189cf842006b5da461583f0486a94493 --- /dev/null +++ b/venv/lib/python3.10/site-packages/zstandard/backend_cffi.py @@ -0,0 +1,4477 @@ +# Copyright (c) 2016-present, Gregory Szorc +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +"""Python interface to the Zstandard (zstd) compression library.""" + +from __future__ import absolute_import, unicode_literals + +# This should match what the C extension exports. +__all__ = [ + "BufferSegment", + "BufferSegments", + "BufferWithSegments", + "BufferWithSegmentsCollection", + "ZstdCompressionChunker", + "ZstdCompressionDict", + "ZstdCompressionObj", + "ZstdCompressionParameters", + "ZstdCompressionReader", + "ZstdCompressionWriter", + "ZstdCompressor", + "ZstdDecompressionObj", + "ZstdDecompressionReader", + "ZstdDecompressionWriter", + "ZstdDecompressor", + "ZstdError", + "FrameParameters", + "backend_features", + "estimate_decompression_context_size", + "frame_content_size", + "frame_header_size", + "get_frame_parameters", + "train_dictionary", + # Constants. + "FLUSH_BLOCK", + "FLUSH_FRAME", + "COMPRESSOBJ_FLUSH_FINISH", + "COMPRESSOBJ_FLUSH_BLOCK", + "ZSTD_VERSION", + "FRAME_HEADER", + "CONTENTSIZE_UNKNOWN", + "CONTENTSIZE_ERROR", + "MAX_COMPRESSION_LEVEL", + "COMPRESSION_RECOMMENDED_INPUT_SIZE", + "COMPRESSION_RECOMMENDED_OUTPUT_SIZE", + "DECOMPRESSION_RECOMMENDED_INPUT_SIZE", + "DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE", + "MAGIC_NUMBER", + "BLOCKSIZELOG_MAX", + "BLOCKSIZE_MAX", + "WINDOWLOG_MIN", + "WINDOWLOG_MAX", + "CHAINLOG_MIN", + "CHAINLOG_MAX", + "HASHLOG_MIN", + "HASHLOG_MAX", + "MINMATCH_MIN", + "MINMATCH_MAX", + "SEARCHLOG_MIN", + "SEARCHLOG_MAX", + "SEARCHLENGTH_MIN", + "SEARCHLENGTH_MAX", + "TARGETLENGTH_MIN", + "TARGETLENGTH_MAX", + "LDM_MINMATCH_MIN", + "LDM_MINMATCH_MAX", + "LDM_BUCKETSIZELOG_MAX", + "STRATEGY_FAST", + "STRATEGY_DFAST", + "STRATEGY_GREEDY", + "STRATEGY_LAZY", + "STRATEGY_LAZY2", + "STRATEGY_BTLAZY2", + "STRATEGY_BTOPT", + "STRATEGY_BTULTRA", + "STRATEGY_BTULTRA2", + "DICT_TYPE_AUTO", + "DICT_TYPE_RAWCONTENT", + "DICT_TYPE_FULLDICT", + "FORMAT_ZSTD1", + "FORMAT_ZSTD1_MAGICLESS", +] + +import io +import os + +from ._cffi import ( # type: ignore + ffi, + lib, +) + + +backend_features = set() # type: ignore + +COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize() +COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize() +DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize() +DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize() + +new_nonzero = ffi.new_allocator(should_clear_after_alloc=False) + + +MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel() +MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER +FRAME_HEADER = b"\x28\xb5\x2f\xfd" +CONTENTSIZE_UNKNOWN = lib.ZSTD_CONTENTSIZE_UNKNOWN +CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR +ZSTD_VERSION = ( + lib.ZSTD_VERSION_MAJOR, + lib.ZSTD_VERSION_MINOR, + lib.ZSTD_VERSION_RELEASE, +) + +BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX +BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX +WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN +WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX +CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN +CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX +HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN +HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX +MINMATCH_MIN = lib.ZSTD_MINMATCH_MIN +MINMATCH_MAX = lib.ZSTD_MINMATCH_MAX +SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN +SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX +SEARCHLENGTH_MIN = lib.ZSTD_MINMATCH_MIN +SEARCHLENGTH_MAX = lib.ZSTD_MINMATCH_MAX +TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN +TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX +LDM_MINMATCH_MIN = lib.ZSTD_LDM_MINMATCH_MIN +LDM_MINMATCH_MAX = lib.ZSTD_LDM_MINMATCH_MAX +LDM_BUCKETSIZELOG_MAX = lib.ZSTD_LDM_BUCKETSIZELOG_MAX + +STRATEGY_FAST = lib.ZSTD_fast +STRATEGY_DFAST = lib.ZSTD_dfast +STRATEGY_GREEDY = lib.ZSTD_greedy +STRATEGY_LAZY = lib.ZSTD_lazy +STRATEGY_LAZY2 = lib.ZSTD_lazy2 +STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2 +STRATEGY_BTOPT = lib.ZSTD_btopt +STRATEGY_BTULTRA = lib.ZSTD_btultra +STRATEGY_BTULTRA2 = lib.ZSTD_btultra2 + +DICT_TYPE_AUTO = lib.ZSTD_dct_auto +DICT_TYPE_RAWCONTENT = lib.ZSTD_dct_rawContent +DICT_TYPE_FULLDICT = lib.ZSTD_dct_fullDict + +FORMAT_ZSTD1 = lib.ZSTD_f_zstd1 +FORMAT_ZSTD1_MAGICLESS = lib.ZSTD_f_zstd1_magicless + +FLUSH_BLOCK = 0 +FLUSH_FRAME = 1 + +COMPRESSOBJ_FLUSH_FINISH = 0 +COMPRESSOBJ_FLUSH_BLOCK = 1 + + +def _cpu_count(): + # os.cpu_count() was introducd in Python 3.4. + try: + return os.cpu_count() or 0 + except AttributeError: + pass + + # Linux. + try: + return os.sysconf("SC_NPROCESSORS_ONLN") + except (AttributeError, ValueError): + pass + + # TODO implement on other platforms. + return 0 + + +class BufferSegment: + """Represents a segment within a ``BufferWithSegments``. + + This type is essentially a reference to N bytes within a + ``BufferWithSegments``. + + The object conforms to the buffer protocol. + """ + + @property + def offset(self): + """The byte offset of this segment within its parent buffer.""" + raise NotImplementedError() + + def __len__(self): + """Obtain the length of the segment, in bytes.""" + raise NotImplementedError() + + def tobytes(self): + """Obtain bytes copy of this segment.""" + raise NotImplementedError() + + +class BufferSegments: + """Represents an array of ``(offset, length)`` integers. + + This type is effectively an index used by :py:class:`BufferWithSegments`. + + The array members are 64-bit unsigned integers using host/native bit order. + + Instances conform to the buffer protocol. + """ + + +class BufferWithSegments: + """A memory buffer containing N discrete items of known lengths. + + This type is essentially a fixed size memory address and an array + of 2-tuples of ``(offset, length)`` 64-bit unsigned native-endian + integers defining the byte offset and length of each segment within + the buffer. + + Instances behave like containers. + + Instances also conform to the buffer protocol. So a reference to the + backing bytes can be obtained via ``memoryview(o)``. A *copy* of the + backing bytes can be obtained via ``.tobytes()``. + + This type exists to facilitate operations against N>1 items without + the overhead of Python object creation and management. Used with + APIs like :py:meth:`ZstdDecompressor.multi_decompress_to_buffer`, it + is possible to decompress many objects in parallel without the GIL + held, leading to even better performance. + """ + + @property + def size(self): + """Total sizein bytes of the backing buffer.""" + raise NotImplementedError() + + def __len__(self): + raise NotImplementedError() + + def __getitem__(self, i): + """Obtains a segment within the buffer. + + The returned object references memory within this buffer. + + :param i: + Integer index of segment to retrieve. + :return: + :py:class:`BufferSegment` + """ + raise NotImplementedError() + + def segments(self): + """Obtain the array of ``(offset, length)`` segments in the buffer. + + :return: + :py:class:`BufferSegments` + """ + raise NotImplementedError() + + def tobytes(self): + """Obtain bytes copy of this instance.""" + raise NotImplementedError() + + +class BufferWithSegmentsCollection: + """A virtual spanning view over multiple BufferWithSegments. + + Instances are constructed from 1 or more :py:class:`BufferWithSegments` + instances. The resulting object behaves like an ordered sequence whose + members are the segments within each ``BufferWithSegments``. + + If the object is composed of 2 ``BufferWithSegments`` instances with the + first having 2 segments and the second have 3 segments, then ``b[0]`` + and ``b[1]`` access segments in the first object and ``b[2]``, ``b[3]``, + and ``b[4]`` access segments from the second. + """ + + def __len__(self): + """The number of segments within all ``BufferWithSegments``.""" + raise NotImplementedError() + + def __getitem__(self, i): + """Obtain the ``BufferSegment`` at an offset.""" + raise NotImplementedError() + + +class ZstdError(Exception): + pass + + +def _zstd_error(zresult): + # Resolves to bytes on Python 2 and 3. We use the string for formatting + # into error messages, which will be literal unicode. So convert it to + # unicode. + return ffi.string(lib.ZSTD_getErrorName(zresult)).decode("utf-8") + + +def _make_cctx_params(params): + res = lib.ZSTD_createCCtxParams() + if res == ffi.NULL: + raise MemoryError() + + res = ffi.gc(res, lib.ZSTD_freeCCtxParams) + + attrs = [ + (lib.ZSTD_c_format, params.format), + (lib.ZSTD_c_compressionLevel, params.compression_level), + (lib.ZSTD_c_windowLog, params.window_log), + (lib.ZSTD_c_hashLog, params.hash_log), + (lib.ZSTD_c_chainLog, params.chain_log), + (lib.ZSTD_c_searchLog, params.search_log), + (lib.ZSTD_c_minMatch, params.min_match), + (lib.ZSTD_c_targetLength, params.target_length), + (lib.ZSTD_c_strategy, params.strategy), + (lib.ZSTD_c_contentSizeFlag, params.write_content_size), + (lib.ZSTD_c_checksumFlag, params.write_checksum), + (lib.ZSTD_c_dictIDFlag, params.write_dict_id), + (lib.ZSTD_c_nbWorkers, params.threads), + (lib.ZSTD_c_jobSize, params.job_size), + (lib.ZSTD_c_overlapLog, params.overlap_log), + (lib.ZSTD_c_forceMaxWindow, params.force_max_window), + (lib.ZSTD_c_enableLongDistanceMatching, params.enable_ldm), + (lib.ZSTD_c_ldmHashLog, params.ldm_hash_log), + (lib.ZSTD_c_ldmMinMatch, params.ldm_min_match), + (lib.ZSTD_c_ldmBucketSizeLog, params.ldm_bucket_size_log), + (lib.ZSTD_c_ldmHashRateLog, params.ldm_hash_rate_log), + ] + + for param, value in attrs: + _set_compression_parameter(res, param, value) + + return res + + +class ZstdCompressionParameters(object): + """Low-level zstd compression parameters. + + This type represents a collection of parameters to control how zstd + compression is performed. + + Instances can be constructed from raw parameters or derived from a + base set of defaults specified from a compression level (recommended) + via :py:meth:`ZstdCompressionParameters.from_level`. + + >>> # Derive compression settings for compression level 7. + >>> params = zstandard.ZstdCompressionParameters.from_level(7) + + >>> # With an input size of 1MB + >>> params = zstandard.ZstdCompressionParameters.from_level(7, source_size=1048576) + + Using ``from_level()``, it is also possible to override individual compression + parameters or to define additional settings that aren't automatically derived. + e.g.: + + >>> params = zstandard.ZstdCompressionParameters.from_level(4, window_log=10) + >>> params = zstandard.ZstdCompressionParameters.from_level(5, threads=4) + + Or you can define low-level compression settings directly: + + >>> params = zstandard.ZstdCompressionParameters(window_log=12, enable_ldm=True) + + Once a ``ZstdCompressionParameters`` instance is obtained, it can be used to + configure a compressor: + + >>> cctx = zstandard.ZstdCompressor(compression_params=params) + + Some of these are very low-level settings. It may help to consult the official + zstandard documentation for their behavior. Look for the ``ZSTD_p_*`` constants + in ``zstd.h`` (https://github.com/facebook/zstd/blob/dev/lib/zstd.h). + """ + + @staticmethod + def from_level(level, source_size=0, dict_size=0, **kwargs): + """Create compression parameters from a compression level. + + :param level: + Integer compression level. + :param source_size: + Integer size in bytes of source to be compressed. + :param dict_size: + Integer size in bytes of compression dictionary to use. + :return: + :py:class:`ZstdCompressionParameters` + """ + params = lib.ZSTD_getCParams(level, source_size, dict_size) + + args = { + "window_log": "windowLog", + "chain_log": "chainLog", + "hash_log": "hashLog", + "search_log": "searchLog", + "min_match": "minMatch", + "target_length": "targetLength", + "strategy": "strategy", + } + + for arg, attr in args.items(): + if arg not in kwargs: + kwargs[arg] = getattr(params, attr) + + return ZstdCompressionParameters(**kwargs) + + def __init__( + self, + format=0, + compression_level=0, + window_log=0, + hash_log=0, + chain_log=0, + search_log=0, + min_match=0, + target_length=0, + strategy=-1, + write_content_size=1, + write_checksum=0, + write_dict_id=0, + job_size=0, + overlap_log=-1, + force_max_window=0, + enable_ldm=0, + ldm_hash_log=0, + ldm_min_match=0, + ldm_bucket_size_log=0, + ldm_hash_rate_log=-1, + threads=0, + ): + params = lib.ZSTD_createCCtxParams() + if params == ffi.NULL: + raise MemoryError() + + params = ffi.gc(params, lib.ZSTD_freeCCtxParams) + + self._params = params + + if threads < 0: + threads = _cpu_count() + + # We need to set ZSTD_c_nbWorkers before ZSTD_c_jobSize and ZSTD_c_overlapLog + # because setting ZSTD_c_nbWorkers resets the other parameters. + _set_compression_parameter(params, lib.ZSTD_c_nbWorkers, threads) + + _set_compression_parameter(params, lib.ZSTD_c_format, format) + _set_compression_parameter( + params, lib.ZSTD_c_compressionLevel, compression_level + ) + _set_compression_parameter(params, lib.ZSTD_c_windowLog, window_log) + _set_compression_parameter(params, lib.ZSTD_c_hashLog, hash_log) + _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log) + _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log) + _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match) + _set_compression_parameter( + params, lib.ZSTD_c_targetLength, target_length + ) + + if strategy == -1: + strategy = 0 + + _set_compression_parameter(params, lib.ZSTD_c_strategy, strategy) + _set_compression_parameter( + params, lib.ZSTD_c_contentSizeFlag, write_content_size + ) + _set_compression_parameter( + params, lib.ZSTD_c_checksumFlag, write_checksum + ) + _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id) + _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size) + + if overlap_log == -1: + overlap_log = 0 + + _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log) + _set_compression_parameter( + params, lib.ZSTD_c_forceMaxWindow, force_max_window + ) + _set_compression_parameter( + params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm + ) + _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log) + _set_compression_parameter( + params, lib.ZSTD_c_ldmMinMatch, ldm_min_match + ) + _set_compression_parameter( + params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log + ) + + if ldm_hash_rate_log == -1: + ldm_hash_rate_log = 0 + + _set_compression_parameter( + params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log + ) + + @property + def format(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_format) + + @property + def compression_level(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel + ) + + @property + def window_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_windowLog) + + @property + def hash_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_hashLog) + + @property + def chain_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_chainLog) + + @property + def search_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_searchLog) + + @property + def min_match(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_minMatch) + + @property + def target_length(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_targetLength) + + @property + def strategy(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_strategy) + + @property + def write_content_size(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_contentSizeFlag + ) + + @property + def write_checksum(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_checksumFlag) + + @property + def write_dict_id(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_dictIDFlag) + + @property + def job_size(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_jobSize) + + @property + def overlap_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_overlapLog) + + @property + def force_max_window(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_forceMaxWindow + ) + + @property + def enable_ldm(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_enableLongDistanceMatching + ) + + @property + def ldm_hash_log(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashLog) + + @property + def ldm_min_match(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_ldmMinMatch) + + @property + def ldm_bucket_size_log(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmBucketSizeLog + ) + + @property + def ldm_hash_rate_log(self): + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmHashRateLog + ) + + @property + def threads(self): + return _get_compression_parameter(self._params, lib.ZSTD_c_nbWorkers) + + def estimated_compression_context_size(self): + """Estimated size in bytes needed to compress with these parameters.""" + return lib.ZSTD_estimateCCtxSize_usingCCtxParams(self._params) + + +def estimate_decompression_context_size(): + """Estimate the memory size requirements for a decompressor instance. + + :return: + Integer number of bytes. + """ + return lib.ZSTD_estimateDCtxSize() + + +def _set_compression_parameter(params, param, value): + zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set compression context parameter: %s" + % _zstd_error(zresult) + ) + + +def _get_compression_parameter(params, param): + result = ffi.new("int *") + + zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to get compression context parameter: %s" + % _zstd_error(zresult) + ) + + return result[0] + + +class ZstdCompressionWriter(object): + """Writable compressing stream wrapper. + + ``ZstdCompressionWriter`` is a write-only stream interface for writing + compressed data to another stream. + + This type conforms to the ``io.RawIOBase`` interface and should be usable + by any type that operates against a *file-object* (``typing.BinaryIO`` + in Python type hinting speak). Only methods that involve writing will do + useful things. + + As data is written to this stream (e.g. via ``write()``), that data + is sent to the compressor. As compressed data becomes available from + the compressor, it is sent to the underlying stream by calling its + ``write()`` method. + + Both ``write()`` and ``flush()`` return the number of bytes written to the + object's ``write()``. In many cases, small inputs do not accumulate enough + data to cause a write and ``write()`` will return ``0``. + + Calling ``close()`` will mark the stream as closed and subsequent I/O + operations will raise ``ValueError`` (per the documented behavior of + ``io.RawIOBase``). ``close()`` will also call ``close()`` on the underlying + stream if such a method exists and the instance was constructed with + ``closefd=True`` + + Instances are obtained by calling :py:meth:`ZstdCompressor.stream_writer`. + + Typically usage is as follows: + + >>> cctx = zstandard.ZstdCompressor(level=10) + >>> compressor = cctx.stream_writer(fh) + >>> compressor.write(b"chunk 0\\n") + >>> compressor.write(b"chunk 1\\n") + >>> compressor.flush() + >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\n`` at this point. + >>> # Receiver is also expecting more data in the zstd *frame*. + >>> + >>> compressor.write(b"chunk 2\\n") + >>> compressor.flush(zstandard.FLUSH_FRAME) + >>> # Receiver will be able to decode ``chunk 0\\nchunk 1\\nchunk 2``. + >>> # Receiver is expecting no more data, as the zstd frame is closed. + >>> # Any future calls to ``write()`` at this point will construct a new + >>> # zstd frame. + + Instances can be used as context managers. Exiting the context manager is + the equivalent of calling ``close()``, which is equivalent to calling + ``flush(zstandard.FLUSH_FRAME)``: + + >>> cctx = zstandard.ZstdCompressor(level=10) + >>> with cctx.stream_writer(fh) as compressor: + ... compressor.write(b'chunk 0') + ... compressor.write(b'chunk 1') + ... ... + + .. important:: + + If ``flush(FLUSH_FRAME)`` is not called, emitted data doesn't + constitute a full zstd *frame* and consumers of this data may complain + about malformed input. It is recommended to use instances as a context + manager to ensure *frames* are properly finished. + + If the size of the data being fed to this streaming compressor is known, + you can declare it before compression begins: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh, size=data_len) as compressor: + ... compressor.write(chunk0) + ... compressor.write(chunk1) + ... ... + + Declaring the size of the source data allows compression parameters to + be tuned. And if ``write_content_size`` is used, it also results in the + content size being written into the frame header of the output data. + + The size of chunks being ``write()`` to the destination can be specified: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh, write_size=32768) as compressor: + ... ... + + To see how much memory is being used by the streaming compressor: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh) as compressor: + ... ... + ... byte_size = compressor.memory_size() + + Thte total number of bytes written so far are exposed via ``tell()``: + + >>> cctx = zstandard.ZstdCompressor() + >>> with cctx.stream_writer(fh) as compressor: + ... ... + ... total_written = compressor.tell() + + ``stream_writer()`` accepts a ``write_return_read`` boolean argument to + control the return value of ``write()``. When ``False`` (the default), + ``write()`` returns the number of bytes that were ``write()``'en to the + underlying object. When ``True``, ``write()`` returns the number of bytes + read from the input that were subsequently written to the compressor. + ``True`` is the *proper* behavior for ``write()`` as specified by the + ``io.RawIOBase`` interface and will become the default value in a future + release. + """ + + def __init__( + self, + compressor, + writer, + source_size, + write_size, + write_return_read, + closefd=True, + ): + self._compressor = compressor + self._writer = writer + self._write_size = write_size + self._write_return_read = bool(write_return_read) + self._closefd = bool(closefd) + self._entered = False + self._closing = False + self._closed = False + self._bytes_compressed = 0 + + self._dst_buffer = ffi.new("char[]", write_size) + self._out_buffer = ffi.new("ZSTD_outBuffer *") + self._out_buffer.dst = self._dst_buffer + self._out_buffer.size = len(self._dst_buffer) + self._out_buffer.pos = 0 + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + def __enter__(self): + if self._closed: + raise ValueError("stream is closed") + + if self._entered: + raise ZstdError("cannot __enter__ multiple times") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self.close() + self._compressor = None + + return False + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + def memory_size(self): + return lib.ZSTD_sizeof_CCtx(self._compressor._cctx) + + def fileno(self): + f = getattr(self._writer, "fileno", None) + if f: + return f() + else: + raise OSError("fileno not available on underlying writer") + + def close(self): + if self._closed: + return + + try: + self._closing = True + self.flush(FLUSH_FRAME) + finally: + self._closing = False + self._closed = True + + # Call close() on underlying stream as well. + f = getattr(self._writer, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def isatty(self): + return False + + def readable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def seek(self, offset, whence=None): + raise io.UnsupportedOperation() + + def seekable(self): + return False + + def truncate(self, size=None): + raise io.UnsupportedOperation() + + def writable(self): + return True + + def writelines(self, lines): + raise NotImplementedError("writelines() is not yet implemented") + + def read(self, size=-1): + raise io.UnsupportedOperation() + + def readall(self): + raise io.UnsupportedOperation() + + def readinto(self, b): + raise io.UnsupportedOperation() + + def write(self, data): + """Send data to the compressor and possibly to the inner stream.""" + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + data_buffer = ffi.from_buffer(data) + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + out_buffer = self._out_buffer + out_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, + out_buffer, + in_buffer, + lib.ZSTD_e_continue, + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + self._bytes_compressed += out_buffer.pos + out_buffer.pos = 0 + + if self._write_return_read: + return in_buffer.pos + else: + return total_write + + def flush(self, flush_mode=FLUSH_BLOCK): + """Evict data from compressor's internal state and write it to inner stream. + + Calling this method may result in 0 or more ``write()`` calls to the + inner stream. + + This method will also call ``flush()`` on the inner stream, if such a + method exists. + + :param flush_mode: + How to flush the zstd compressor. + + ``zstandard.FLUSH_BLOCK`` will flush data already sent to the + compressor but not emitted to the inner stream. The stream is still + writable after calling this. This is the default behavior. + + See documentation for other ``zstandard.FLUSH_*`` constants for more + flushing options. + :return: + Integer number of bytes written to the inner stream. + """ + + if flush_mode == FLUSH_BLOCK: + flush = lib.ZSTD_e_flush + elif flush_mode == FLUSH_FRAME: + flush = lib.ZSTD_e_end + else: + raise ValueError("unknown flush_mode: %r" % flush_mode) + + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + out_buffer = self._out_buffer + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, in_buffer, flush + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + self._bytes_compressed += out_buffer.pos + out_buffer.pos = 0 + + if not zresult: + break + + f = getattr(self._writer, "flush", None) + if f and not self._closing: + f() + + return total_write + + def tell(self): + return self._bytes_compressed + + +class ZstdCompressionObj(object): + """A compressor conforming to the API in Python's standard library. + + This type implements an API similar to compression types in Python's + standard library such as ``zlib.compressobj`` and ``bz2.BZ2Compressor``. + This enables existing code targeting the standard library API to swap + in this type to achieve zstd compression. + + .. important:: + + The design of this API is not ideal for optimal performance. + + The reason performance is not optimal is because the API is limited to + returning a single buffer holding compressed data. When compressing + data, we don't know how much data will be emitted. So in order to + capture all this data in a single buffer, we need to perform buffer + reallocations and/or extra memory copies. This can add significant + overhead depending on the size or nature of the compressed data how + much your application calls this type. + + If performance is critical, consider an API like + :py:meth:`ZstdCompressor.stream_reader`, + :py:meth:`ZstdCompressor.stream_writer`, + :py:meth:`ZstdCompressor.chunker`, or + :py:meth:`ZstdCompressor.read_to_iter`, which result in less overhead + managing buffers. + + Instances are obtained by calling :py:meth:`ZstdCompressor.compressobj`. + + Here is how this API should be used: + + >>> cctx = zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj() + >>> data = cobj.compress(b"raw input 0") + >>> data = cobj.compress(b"raw input 1") + >>> data = cobj.flush() + + Or to flush blocks: + + >>> cctx.zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj() + >>> data = cobj.compress(b"chunk in first block") + >>> data = cobj.flush(zstandard.COMPRESSOBJ_FLUSH_BLOCK) + >>> data = cobj.compress(b"chunk in second block") + >>> data = cobj.flush() + + For best performance results, keep input chunks under 256KB. This avoids + extra allocations for a large output object. + + It is possible to declare the input size of the data that will be fed + into the compressor: + + >>> cctx = zstandard.ZstdCompressor() + >>> cobj = cctx.compressobj(size=6) + >>> data = cobj.compress(b"foobar") + >>> data = cobj.flush() + """ + + def compress(self, data): + """Send data to the compressor. + + This method receives bytes to feed to the compressor and returns + bytes constituting zstd compressed data. + + The zstd compressor accumulates bytes and the returned bytes may be + substantially smaller or larger than the size of the input data on + any given call. The returned value may be the empty byte string + (``b""``). + + :param data: + Data to write to the compressor. + :return: + Compressed data. + """ + if self._finished: + raise ZstdError("cannot call compress() after compressor finished") + + data_buffer = ffi.from_buffer(data) + source = ffi.new("ZSTD_inBuffer *") + source.src = data_buffer + source.size = len(data_buffer) + source.pos = 0 + + chunks = [] + + while source.pos < len(data): + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, source, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + return b"".join(chunks) + + def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH): + """Emit data accumulated in the compressor that hasn't been outputted yet. + + The ``flush_mode`` argument controls how to end the stream. + + ``zstandard.COMPRESSOBJ_FLUSH_FINISH`` (the default) ends the + compression stream and finishes a zstd frame. Once this type of flush + is performed, ``compress()`` and ``flush()`` can no longer be called. + This type of flush **must** be called to end the compression context. If + not called, the emitted data may be incomplete and may not be readable + by a decompressor. + + ``zstandard.COMPRESSOBJ_FLUSH_BLOCK`` will flush a zstd block. This + ensures that all data fed to this instance will have been omitted and + can be decoded by a decompressor. Flushes of this type can be performed + multiple times. The next call to ``compress()`` will begin a new zstd + block. + + :param flush_mode: + How to flush the zstd compressor. + :return: + Compressed data. + """ + if flush_mode not in ( + COMPRESSOBJ_FLUSH_FINISH, + COMPRESSOBJ_FLUSH_BLOCK, + ): + raise ValueError("flush mode not recognized") + + if self._finished: + raise ZstdError("compressor object already finished") + + if flush_mode == COMPRESSOBJ_FLUSH_BLOCK: + z_flush_mode = lib.ZSTD_e_flush + elif flush_mode == COMPRESSOBJ_FLUSH_FINISH: + z_flush_mode = lib.ZSTD_e_end + self._finished = True + else: + raise ZstdError("unhandled flush mode") + + assert self._out.pos == 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + chunks = [] + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, in_buffer, z_flush_mode + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) + self._out.pos = 0 + + if not zresult: + break + + return b"".join(chunks) + + +class ZstdCompressionChunker(object): + """Compress data to uniformly sized chunks. + + This type allows you to iteratively feed chunks of data into a compressor + and produce output chunks of uniform size. + + ``compress()``, ``flush()``, and ``finish()`` all return an iterator of + ``bytes`` instances holding compressed data. The iterator may be empty. + Callers MUST iterate through all elements of the returned iterator before + performing another operation on the object or else the compressor's + internal state may become confused. This can result in an exception being + raised or malformed data being emitted. + + All chunks emitted by ``compress()`` will have a length of the configured + chunk size. + + ``flush()`` and ``finish()`` may return a final chunk smaller than + the configured chunk size. + + Instances are obtained by calling :py:meth:`ZstdCompressor.chunker`. + + Here is how the API should be used: + + >>> cctx = zstandard.ZstdCompressor() + >>> chunker = cctx.chunker(chunk_size=32768) + >>> + >>> with open(path, 'rb') as fh: + ... while True: + ... in_chunk = fh.read(32768) + ... if not in_chunk: + ... break + ... + ... for out_chunk in chunker.compress(in_chunk): + ... # Do something with output chunk of size 32768. + ... + ... for out_chunk in chunker.finish(): + ... # Do something with output chunks that finalize the zstd frame. + + This compressor type is often a better alternative to + :py:class:`ZstdCompressor.compressobj` because it has better performance + properties. + + ``compressobj()`` will emit output data as it is available. This results + in a *stream* of output chunks of varying sizes. The consistency of the + output chunk size with ``chunker()`` is more appropriate for many usages, + such as sending compressed data to a socket. + + ``compressobj()`` may also perform extra memory reallocations in order + to dynamically adjust the sizes of the output chunks. Since ``chunker()`` + output chunks are all the same size (except for flushed or final chunks), + there is less memory allocation/copying overhead. + """ + + def __init__(self, compressor, chunk_size): + self._compressor = compressor + self._out = ffi.new("ZSTD_outBuffer *") + self._dst_buffer = ffi.new("char[]", chunk_size) + self._out.dst = self._dst_buffer + self._out.size = chunk_size + self._out.pos = 0 + + self._in = ffi.new("ZSTD_inBuffer *") + self._in.src = ffi.NULL + self._in.size = 0 + self._in.pos = 0 + self._finished = False + + def compress(self, data): + """Feed new input data into the compressor. + + :param data: + Data to feed to compressor. + :return: + Iterator of ``bytes`` representing chunks of compressed data. + """ + if self._finished: + raise ZstdError("cannot call compress() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot perform operation before consuming output " + "from previous operation" + ) + + data_buffer = ffi.from_buffer(data) + + if not len(data_buffer): + return + + self._in.src = data_buffer + self._in.size = len(data_buffer) + self._in.pos = 0 + + while self._in.pos < self._in.size: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_continue + ) + + if self._in.pos == self._in.size: + self._in.src = ffi.NULL + self._in.size = 0 + self._in.pos = 0 + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos == self._out.size: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + def flush(self): + """Flushes all data currently in the compressor. + + :return: + Iterator of ``bytes`` of compressed data. + """ + if self._finished: + raise ZstdError("cannot call flush() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot call flush() before consuming output from " + "previous operation" + ) + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + if not zresult: + return + + def finish(self): + """Signals the end of input data. + + No new data can be compressed after this method is called. + + This method will flush buffered data and finish the zstd frame. + + :return: + Iterator of ``bytes`` of compressed data. + """ + if self._finished: + raise ZstdError("cannot call finish() after compression finished") + + if self._in.src != ffi.NULL: + raise ZstdError( + "cannot call finish() before consuming output from " + "previous operation" + ) + + while True: + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if self._out.pos: + yield ffi.buffer(self._out.dst, self._out.pos)[:] + self._out.pos = 0 + + if not zresult: + self._finished = True + return + + +class ZstdCompressionReader(object): + """Readable compressing stream wrapper. + + ``ZstdCompressionReader`` is a read-only stream interface for obtaining + compressed data from a source. + + This type conforms to the ``io.RawIOBase`` interface and should be usable + by any type that operates against a *file-object* (``typing.BinaryIO`` + in Python type hinting speak). + + Instances are neither writable nor seekable (even if the underlying + source is seekable). ``readline()`` and ``readlines()`` are not implemented + because they don't make sense for compressed data. ``tell()`` returns the + number of compressed bytes emitted so far. + + Instances are obtained by calling :py:meth:`ZstdCompressor.stream_reader`. + + In this example, we open a file for reading and then wrap that file + handle with a stream from which compressed data can be ``read()``. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... reader = cctx.stream_reader(fh) + ... while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... + ... # Do something with compressed chunk. + + Instances can also be used as context managers: + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... with cctx.stream_reader(fh) as reader: + ... while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... + ... # Do something with compressed chunk. + + When the context manager exits or ``close()`` is called, the stream is + closed, underlying resources are released, and future operations against + the compression stream will fail. + + ``stream_reader()`` accepts a ``size`` argument specifying how large the + input stream is. This is used to adjust compression parameters so they are + tailored to the source size. e.g. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... with cctx.stream_reader(fh, size=os.stat(path).st_size) as reader: + ... ... + + If the ``source`` is a stream, you can specify how large ``read()`` + requests to that stream should be via the ``read_size`` argument. + It defaults to ``zstandard.COMPRESSION_RECOMMENDED_INPUT_SIZE``. e.g. + + >>> with open(path, 'rb') as fh: + ... cctx = zstandard.ZstdCompressor() + ... # Will perform fh.read(8192) when obtaining data to feed into the + ... # compressor. + ... with cctx.stream_reader(fh, read_size=8192) as reader: + ... ... + """ + + def __init__(self, compressor, source, read_size, closefd=True): + self._compressor = compressor + self._source = source + self._read_size = read_size + self._closefd = closefd + self._entered = False + self._closed = False + self._bytes_compressed = 0 + self._finished_input = False + self._finished_output = False + + self._in_buffer = ffi.new("ZSTD_inBuffer *") + # Holds a ref so backing bytes in self._in_buffer stay alive. + self._source_buffer = None + + def __enter__(self): + if self._entered: + raise ValueError("cannot __enter__ multiple times") + + if self._closed: + raise ValueError("stream is closed") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self._compressor = None + self.close() + self._source = None + + return False + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return False + + def readline(self): + raise io.UnsupportedOperation() + + def readlines(self): + raise io.UnsupportedOperation() + + def write(self, data): + raise OSError("stream is not writable") + + def writelines(self, ignored): + raise OSError("stream is not writable") + + def isatty(self): + return False + + def flush(self): + return None + + def close(self): + if self._closed: + return + + self._closed = True + + f = getattr(self._source, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def tell(self): + return self._bytes_compressed + + def readall(self): + chunks = [] + + while True: + chunk = self.read(1048576) + if not chunk: + break + + chunks.append(chunk) + + return b"".join(chunks) + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + next = __next__ + + def _read_input(self): + if self._finished_input: + return + + if hasattr(self._source, "read"): + data = self._source.read(self._read_size) + + if not data: + self._finished_input = True + return + + self._source_buffer = ffi.from_buffer(data) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + else: + self._source_buffer = ffi.from_buffer(self._source) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + + def _compress_into_buffer(self, out_buffer): + if self._in_buffer.pos >= self._in_buffer.size: + return + + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, + out_buffer, + self._in_buffer, + lib.ZSTD_e_continue, + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if self._in_buffer.pos == self._in_buffer.size: + self._in_buffer.src = ffi.NULL + self._in_buffer.pos = 0 + self._in_buffer.size = 0 + self._source_buffer = None + + if not hasattr(self._source, "read"): + self._finished_input = True + + if lib.ZSTD_isError(zresult): + raise ZstdError("zstd compress error: %s", _zstd_error(zresult)) + + return out_buffer.pos and out_buffer.pos == out_buffer.size + + def read(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if size == -1: + return self.readall() + + if self._finished_output or size == 0: + return b"" + + # Need a dedicated ref to dest buffer otherwise it gets collected. + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # EOF + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def read1(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if self._finished_output or size == 0: + return b"" + + # -1 returns arbitrary number of bytes. + if size == -1: + size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE + + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + # read1() dictates that we can perform at most 1 call to the + # underlying stream to get input. However, we can't satisfy this + # restriction with compression because not all input generates output. + # It is possible to perform a block flush in order to ensure output. + # But this may not be desirable behavior. So we allow multiple read() + # to the underlying stream. But unlike read(), we stop once we have + # any output. + + self._compress_into_buffer(out_buffer) + if out_buffer.pos: + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + + # If we've filled the output buffer, return immediately. + if self._compress_into_buffer(out_buffer): + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # If we've populated the output buffer and we're not at EOF, + # also return, as we've satisfied the read1() limits. + if out_buffer.pos and not self._finished_input: + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + # Else if we're at EOS and we have room left in the buffer, + # fall through to below and try to add more data to the output. + + # EOF. + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + while not self._finished_input: + self._read_input() + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + # EOF. + old_pos = out_buffer.pos + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return out_buffer.pos + + def readinto1(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + self._compress_into_buffer(out_buffer) + if out_buffer.pos: + return out_buffer.pos + + while not self._finished_input: + self._read_input() + + if self._compress_into_buffer(out_buffer): + return out_buffer.pos + + if out_buffer.pos and not self._finished_input: + return out_buffer.pos + + # EOF. + old_pos = out_buffer.pos + + zresult = lib.ZSTD_compressStream2( + self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_end + ) + + self._bytes_compressed += out_buffer.pos - old_pos + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if zresult == 0: + self._finished_output = True + + return out_buffer.pos + + +class ZstdCompressor(object): + """ + Create an object used to perform Zstandard compression. + + Each instance is essentially a wrapper around a ``ZSTD_CCtx`` from + zstd's C API. + + An instance can compress data various ways. Instances can be used + multiple times. Each compression operation will use the compression + parameters defined at construction time. + + .. note: + + When using a compression dictionary and multiple compression + operations are performed, the ``ZstdCompressionParameters`` derived + from an integer compression ``level`` and the first compressed data's + size will be reused for all subsequent operations. This may not be + desirable if source data sizes vary significantly. + + ``compression_params`` is mutually exclusive with ``level``, + ``write_checksum``, ``write_content_size``, ``write_dict_id``, and + ``threads``. + + Assume that each ``ZstdCompressor`` instance can only handle a single + logical compression operation at the same time. i.e. if you call a method + like ``stream_reader()`` to obtain multiple objects derived from the same + ``ZstdCompressor`` instance and attempt to use them simultaneously, errors + will likely occur. + + If you need to perform multiple logical compression operations and you + can't guarantee those operations are temporally non-overlapping, you need + to obtain multiple ``ZstdCompressor`` instances. + + Unless specified otherwise, assume that no two methods of + ``ZstdCompressor`` instances can be called from multiple Python + threads simultaneously. In other words, assume instances are not thread safe + unless stated otherwise. + + :param level: + Integer compression level. Valid values are all negative integers + through 22. Lower values generally yield faster operations with lower + compression ratios. Higher values are generally slower but compress + better. The default is 3, which is what the ``zstd`` CLI uses. Negative + levels effectively engage ``--fast`` mode from the ``zstd`` CLI. + :param dict_data: + A ``ZstdCompressionDict`` to be used to compress with dictionary + data. + :param compression_params: + A ``ZstdCompressionParameters`` instance defining low-level compression + parameters. If defined, this will overwrite the ``level`` argument. + :param write_checksum: + If True, a 4 byte content checksum will be written with the compressed + data, allowing the decompressor to perform content verification. + :param write_content_size: + If True (the default), the decompressed content size will be included + in the header of the compressed data. This data will only be written if + the compressor knows the size of the input data. + :param write_dict_id: + Determines whether the dictionary ID will be written into the compressed + data. Defaults to True. Only adds content to the compressed data if + a dictionary is being used. + :param threads: + Number of threads to use to compress data concurrently. When set, + compression operations are performed on multiple threads. The default + value (0) disables multi-threaded compression. A value of ``-1`` means + to set the number of threads to the number of detected logical CPUs. + """ + + def __init__( + self, + level=3, + dict_data=None, + compression_params=None, + write_checksum=None, + write_content_size=None, + write_dict_id=None, + threads=0, + ): + if level > lib.ZSTD_maxCLevel(): + raise ValueError( + "level must be less than %d" % lib.ZSTD_maxCLevel() + ) + + if threads < 0: + threads = _cpu_count() + + if compression_params and write_checksum is not None: + raise ValueError( + "cannot define compression_params and " "write_checksum" + ) + + if compression_params and write_content_size is not None: + raise ValueError( + "cannot define compression_params and " "write_content_size" + ) + + if compression_params and write_dict_id is not None: + raise ValueError( + "cannot define compression_params and " "write_dict_id" + ) + + if compression_params and threads: + raise ValueError("cannot define compression_params and threads") + + if compression_params: + self._params = _make_cctx_params(compression_params) + else: + if write_dict_id is None: + write_dict_id = True + + params = lib.ZSTD_createCCtxParams() + if params == ffi.NULL: + raise MemoryError() + + self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams) + + _set_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel, level + ) + + _set_compression_parameter( + self._params, + lib.ZSTD_c_contentSizeFlag, + write_content_size if write_content_size is not None else 1, + ) + + _set_compression_parameter( + self._params, + lib.ZSTD_c_checksumFlag, + 1 if write_checksum else 0, + ) + + _set_compression_parameter( + self._params, lib.ZSTD_c_dictIDFlag, 1 if write_dict_id else 0 + ) + + if threads: + _set_compression_parameter( + self._params, lib.ZSTD_c_nbWorkers, threads + ) + + cctx = lib.ZSTD_createCCtx() + if cctx == ffi.NULL: + raise MemoryError() + + self._cctx = cctx + self._dict_data = dict_data + + # We defer setting up garbage collection until after calling + # _setup_cctx() to ensure the memory size estimate is more accurate. + try: + self._setup_cctx() + finally: + self._cctx = ffi.gc( + cctx, lib.ZSTD_freeCCtx, size=lib.ZSTD_sizeof_CCtx(cctx) + ) + + def _setup_cctx(self): + zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams( + self._cctx, self._params + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not set compression parameters: %s" + % _zstd_error(zresult) + ) + + dict_data = self._dict_data + + if dict_data: + if dict_data._cdict: + zresult = lib.ZSTD_CCtx_refCDict(self._cctx, dict_data._cdict) + else: + zresult = lib.ZSTD_CCtx_loadDictionary_advanced( + self._cctx, + dict_data.as_bytes(), + len(dict_data), + lib.ZSTD_dlm_byRef, + dict_data._dict_type, + ) + + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not load compression dictionary: %s" + % _zstd_error(zresult) + ) + + def memory_size(self): + """Obtain the memory usage of this compressor, in bytes. + + >>> cctx = zstandard.ZstdCompressor() + >>> memory = cctx.memory_size() + """ + return lib.ZSTD_sizeof_CCtx(self._cctx) + + def compress(self, data): + """ + Compress data in a single operation. + + This is the simplest mechanism to perform compression: simply pass in a + value and get a compressed value back. It is almost the most prone to + abuse. + + The input and output values must fit in memory, so passing in very large + values can result in excessive memory usage. For this reason, one of the + streaming based APIs is preferred for larger values. + + :param data: + Source data to compress + :return: + Compressed data + + >>> cctx = zstandard.ZstdCompressor() + >>> compressed = cctx.compress(b"data to compress") + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + data_buffer = ffi.from_buffer(data) + + dest_size = lib.ZSTD_compressBound(len(data_buffer)) + out = new_nonzero("char[]", dest_size) + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + out_buffer = ffi.new("ZSTD_outBuffer *") + in_buffer = ffi.new("ZSTD_inBuffer *") + + out_buffer.dst = out + out_buffer.size = dest_size + out_buffer.pos = 0 + + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + + if lib.ZSTD_isError(zresult): + raise ZstdError("cannot compress: %s" % _zstd_error(zresult)) + elif zresult: + raise ZstdError("unexpected partial frame flush") + + return ffi.buffer(out, out_buffer.pos)[:] + + def compressobj(self, size=-1): + """ + Obtain a compressor exposing the Python standard library compression API. + + See :py:class:`ZstdCompressionObj` for the full documentation. + + :param size: + Size in bytes of data that will be compressed. + :return: + :py:class:`ZstdCompressionObj` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + cobj = ZstdCompressionObj() + cobj._out = ffi.new("ZSTD_outBuffer *") + cobj._dst_buffer = ffi.new( + "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ) + cobj._out.dst = cobj._dst_buffer + cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE + cobj._out.pos = 0 + cobj._compressor = self + cobj._finished = False + + return cobj + + def chunker(self, size=-1, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE): + """ + Create an object for iterative compressing to same-sized chunks. + + This API is similar to :py:meth:`ZstdCompressor.compressobj` but has + better performance properties. + + :param size: + Size in bytes of data that will be compressed. + :param chunk_size: + Size of compressed chunks. + :return: + :py:class:`ZstdCompressionChunker` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + return ZstdCompressionChunker(self, chunk_size=chunk_size) + + def copy_stream( + self, + ifh, + ofh, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Copy data between 2 streams while compressing it. + + Data will be read from ``ifh``, compressed, and written to ``ofh``. + ``ifh`` must have a ``read(size)`` method. ``ofh`` must have a + ``write(data)`` + method. + + >>> cctx = zstandard.ZstdCompressor() + >>> with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: + ... cctx.copy_stream(ifh, ofh) + + It is also possible to declare the size of the source stream: + + >>> cctx = zstandard.ZstdCompressor() + >>> cctx.copy_stream(ifh, ofh, size=len_of_input) + + You can also specify how large the chunks that are ``read()`` + and ``write()`` from and to the streams: + + >>> cctx = zstandard.ZstdCompressor() + >>> cctx.copy_stream(ifh, ofh, read_size=32768, write_size=16384) + + The stream copier returns a 2-tuple of bytes read and written: + + >>> cctx = zstandard.ZstdCompressor() + >>> read_count, write_count = cctx.copy_stream(ifh, ofh) + + :param ifh: + Source stream to read from + :param ofh: + Destination stream to write to + :param size: + Size in bytes of the source stream. If defined, compression + parameters will be tuned for this size. + :param read_size: + Chunk sizes that source stream should be ``read()`` from. + :param write_size: + Chunk sizes that destination stream should be ``write()`` to. + :return: + 2-tuple of ints of bytes read and written, respectively. + """ + + if not hasattr(ifh, "read"): + raise ValueError("first argument must have a read() method") + if not hasattr(ofh, "write"): + raise ValueError("second argument must have a write() method") + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + total_read, total_write = 0, 0 + + while True: + data = ifh.read(read_size) + if not data: + break + + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + # We've finished reading. Flush the compressor. + while True: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + if zresult == 0: + break + + return total_read, total_write + + def stream_reader( + self, + source, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + closefd=True, + ): + """ + Wrap a readable source with a stream that can read compressed data. + + This will produce an object conforming to the ``io.RawIOBase`` + interface which can be ``read()`` from to retrieve compressed data + from a source. + + The source object can be any object with a ``read(size)`` method + or an object that conforms to the buffer protocol. + + See :py:class:`ZstdCompressionReader` for type documentation and usage + examples. + + :param source: + Object to read source data from + :param size: + Size in bytes of source object. + :param read_size: + How many bytes to request when ``read()``'ing from the source. + :param closefd: + Whether to close the source stream when the returned stream is + closed. + :return: + :py:class:`ZstdCompressionReader` + """ + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + try: + size = len(source) + except Exception: + pass + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + return ZstdCompressionReader(self, source, read_size, closefd=closefd) + + def stream_writer( + self, + writer, + size=-1, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + write_return_read=True, + closefd=True, + ): + """ + Create a stream that will write compressed data into another stream. + + The argument to ``stream_writer()`` must have a ``write(data)`` method. + As compressed data is available, ``write()`` will be called with the + compressed data as its argument. Many common Python types implement + ``write()``, including open file handles and ``io.BytesIO``. + + See :py:class:`ZstdCompressionWriter` for more documentation, including + usage examples. + + :param writer: + Stream to write compressed data to. + :param size: + Size in bytes of data to be compressed. If set, it will be used + to influence compression parameter tuning and could result in the + size being written into the header of the compressed data. + :param write_size: + How much data to ``write()`` to ``writer`` at a time. + :param write_return_read: + Whether ``write()`` should return the number of bytes that were + consumed from the input. + :param closefd: + Whether to ``close`` the ``writer`` when this stream is closed. + :return: + :py:class:`ZstdCompressionWriter` + """ + if not hasattr(writer, "write"): + raise ValueError("must pass an object with a write() method") + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + return ZstdCompressionWriter( + self, writer, size, write_size, write_return_read, closefd=closefd + ) + + def read_to_iter( + self, + reader, + size=-1, + read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Read uncompressed data from a reader and return an iterator + + Returns an iterator of compressed data produced from reading from + ``reader``. + + This method provides a mechanism to stream compressed data out of a + source as an iterator of data chunks. + + Uncompressed data will be obtained from ``reader`` by calling the + ``read(size)`` method of it or by reading a slice (if ``reader`` + conforms to the *buffer protocol*). The source data will be streamed + into a compressor. As compressed data is available, it will be exposed + to the iterator. + + Data is read from the source in chunks of ``read_size``. Compressed + chunks are at most ``write_size`` bytes. Both values default to the + zstd input and and output defaults, respectively. + + If reading from the source via ``read()``, ``read()`` will be called + until it raises or returns an empty bytes (``b""``). It is perfectly + valid for the source to deliver fewer bytes than were what requested + by ``read(size)``. + + The caller is partially in control of how fast data is fed into the + compressor by how it consumes the returned iterator. The compressor + will not consume from the reader unless the caller consumes from the + iterator. + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh): + ... # Do something with emitted data. + + ``read_to_iter()`` accepts a ``size`` argument declaring the size of + the input stream: + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh, size=some_int): + >>> pass + + You can also control the size that data is ``read()`` from the source + and the ideal size of output chunks: + + >>> cctx = zstandard.ZstdCompressor() + >>> for chunk in cctx.read_to_iter(fh, read_size=16384, write_size=8192): + >>> pass + + ``read_to_iter()`` does not give direct control over the sizes of chunks + fed into the compressor. Instead, chunk sizes will be whatever the object + being read from delivers. These will often be of a uniform size. + + :param reader: + Stream providing data to be compressed. + :param size: + Size in bytes of input data. + :param read_size: + Controls how many bytes are ``read()`` from the source. + :param write_size: + Controls the output size of emitted chunks. + :return: + Iterator of ``bytes``. + """ + + if hasattr(reader, "read"): + have_read = True + elif hasattr(reader, "__getitem__"): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError( + "must pass an object with a read() method or " + "conforms to buffer protocol" + ) + + lib.ZSTD_CCtx_reset(self._cctx, lib.ZSTD_reset_session_only) + + if size < 0: + size = lib.ZSTD_CONTENTSIZE_UNKNOWN + + zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + in_buffer.src = ffi.NULL + in_buffer.size = 0 + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + while True: + # We should never have output data sitting around after a previous + # iteration. + assert out_buffer.pos == 0 + + # Collect input data. + if have_read: + read_result = reader.read(read_size) + else: + remaining = len(reader) - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset : buffer_offset + slice_size] + buffer_offset += slice_size + + # No new input data. Break out of the read loop. + if not read_result: + break + + # Feed all read data into the compressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + assert out_buffer.pos == 0 + + # And repeat the loop to collect more data. + continue + + # If we get here, input is exhausted. End the stream and emit what + # remains. + while True: + assert out_buffer.pos == 0 + zresult = lib.ZSTD_compressStream2( + self._cctx, out_buffer, in_buffer, lib.ZSTD_e_end + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "error ending compression stream: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + break + + def multi_compress_to_buffer(self, data, threads=-1): + """ + Compress multiple pieces of data as a single function call. + + (Experimental. Not yet supported by CFFI backend.) + + This function is optimized to perform multiple compression operations + as as possible with as little overhead as possible. + + Data to be compressed can be passed as a ``BufferWithSegmentsCollection``, + a ``BufferWithSegments``, or a list containing byte like objects. Each + element of the container will be compressed individually using the + configured parameters on the ``ZstdCompressor`` instance. + + The ``threads`` argument controls how many threads to use for + compression. The default is ``0`` which means to use a single thread. + Negative values use the number of logical CPUs in the machine. + + The function returns a ``BufferWithSegmentsCollection``. This type + represents N discrete memory allocations, each holding 1 or more + compressed frames. + + Output data is written to shared memory buffers. This means that unlike + regular Python objects, a reference to *any* object within the collection + keeps the shared buffer and therefore memory backing it alive. This can + have undesirable effects on process memory usage. + + The API and behavior of this function is experimental and will likely + change. Known deficiencies include: + + * If asked to use multiple threads, it will always spawn that many + threads, even if the input is too small to use them. It should + automatically lower the thread count when the extra threads would + just add overhead. + * The buffer allocation strategy is fixed. There is room to make it + dynamic, perhaps even to allow one output buffer per input, + facilitating a variation of the API to return a list without the + adverse effects of shared memory buffers. + + :param data: + Source to read discrete pieces of data to compress. + + Can be a ``BufferWithSegmentsCollection``, a ``BufferWithSegments``, + or a ``list[bytes]``. + :return: + BufferWithSegmentsCollection holding compressed data. + """ + raise NotImplementedError() + + def frame_progression(self): + """ + Return information on how much work the compressor has done. + + Returns a 3-tuple of (ingested, consumed, produced). + + >>> cctx = zstandard.ZstdCompressor() + >>> (ingested, consumed, produced) = cctx.frame_progression() + """ + progression = lib.ZSTD_getFrameProgression(self._cctx) + + return progression.ingested, progression.consumed, progression.produced + + +class FrameParameters(object): + """Information about a zstd frame. + + Instances have the following attributes: + + ``content_size`` + Integer size of original, uncompressed content. This will be ``0`` if the + original content size isn't written to the frame (controlled with the + ``write_content_size`` argument to ``ZstdCompressor``) or if the input + content size was ``0``. + + ``window_size`` + Integer size of maximum back-reference distance in compressed data. + + ``dict_id`` + Integer of dictionary ID used for compression. ``0`` if no dictionary + ID was used or if the dictionary ID was ``0``. + + ``has_checksum`` + Bool indicating whether a 4 byte content checksum is stored at the end + of the frame. + """ + + def __init__(self, fparams): + self.content_size = fparams.frameContentSize + self.window_size = fparams.windowSize + self.dict_id = fparams.dictID + self.has_checksum = bool(fparams.checksumFlag) + + +def frame_content_size(data): + """Obtain the decompressed size of a frame. + + The returned value is usually accurate. But strictly speaking it should + not be trusted. + + :return: + ``-1`` if size unknown and a non-negative integer otherwise. + """ + data_buffer = ffi.from_buffer(data) + + size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer)) + + if size == lib.ZSTD_CONTENTSIZE_ERROR: + raise ZstdError("error when determining content size") + elif size == lib.ZSTD_CONTENTSIZE_UNKNOWN: + return -1 + else: + return size + + +def frame_header_size(data): + """Obtain the size of a frame header. + + :return: + Integer size in bytes. + """ + data_buffer = ffi.from_buffer(data) + + zresult = lib.ZSTD_frameHeaderSize(data_buffer, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not determine frame header size: %s" % _zstd_error(zresult) + ) + + return zresult + + +def get_frame_parameters(data): + """ + Parse a zstd frame header into frame parameters. + + Depending on which fields are present in the frame and their values, the + length of the frame parameters varies. If insufficient bytes are passed + in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure + frame parameters can be parsed, pass in at least 18 bytes. + + :param data: + Data from which to read frame parameters. + :return: + :py:class:`FrameParameters` + """ + params = ffi.new("ZSTD_frameHeader *") + + data_buffer = ffi.from_buffer(data) + zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer)) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "cannot get frame parameters: %s" % _zstd_error(zresult) + ) + + if zresult: + raise ZstdError( + "not enough data for frame parameters; need %d bytes" % zresult + ) + + return FrameParameters(params[0]) + + +class ZstdCompressionDict(object): + """Represents a computed compression dictionary. + + Instances are obtained by calling :py:func:`train_dictionary` or by + passing bytes obtained from another source into the constructor. + + Instances can be constructed from bytes: + + >>> dict_data = zstandard.ZstdCompressionDict(data) + + It is possible to construct a dictionary from *any* data. If the data + doesn't begin with a magic header, it will be treated as a *prefix* + dictionary. *Prefix* dictionaries allow compression operations to + reference raw data within the dictionary. + + It is possible to force the use of *prefix* dictionaries or to require + a dictionary header: + + >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_RAWCONTENT) + >>> dict_data = zstandard.ZstdCompressionDict(data, dict_type=zstandard.DICT_TYPE_FULLDICT) + + You can see how many bytes are in the dictionary by calling ``len()``: + + >>> dict_data = zstandard.train_dictionary(size, samples) + >>> dict_size = len(dict_data) # will not be larger than ``size`` + + Once you have a dictionary, you can pass it to the objects performing + compression and decompression: + + >>> dict_data = zstandard.train_dictionary(131072, samples) + >>> cctx = zstandard.ZstdCompressor(dict_data=dict_data) + >>> for source_data in input_data: + ... compressed = cctx.compress(source_data) + ... # Do something with compressed data. + ... + >>> dctx = zstandard.ZstdDecompressor(dict_data=dict_data) + >>> for compressed_data in input_data: + ... buffer = io.BytesIO() + ... with dctx.stream_writer(buffer) as decompressor: + ... decompressor.write(compressed_data) + ... # Do something with raw data in ``buffer``. + + Dictionaries have unique integer IDs. You can retrieve this ID via: + + >>> dict_id = zstandard.dictionary_id(dict_data) + + You can obtain the raw data in the dict (useful for persisting and constructing + a ``ZstdCompressionDict`` later) via ``as_bytes()``: + + >>> dict_data = zstandard.train_dictionary(size, samples) + >>> raw_data = dict_data.as_bytes() + + By default, when a ``ZstdCompressionDict`` is *attached* to a + ``ZstdCompressor``, each ``ZstdCompressor`` performs work to prepare the + dictionary for use. This is fine if only 1 compression operation is being + performed or if the ``ZstdCompressor`` is being reused for multiple operations. + But if multiple ``ZstdCompressor`` instances are being used with the dictionary, + this can add overhead. + + It is possible to *precompute* the dictionary so it can readily be consumed + by multiple ``ZstdCompressor`` instances: + + >>> d = zstandard.ZstdCompressionDict(data) + >>> # Precompute for compression level 3. + >>> d.precompute_compress(level=3) + >>> # Precompute with specific compression parameters. + >>> params = zstandard.ZstdCompressionParameters(...) + >>> d.precompute_compress(compression_params=params) + + .. note:: + + When a dictionary is precomputed, the compression parameters used to + precompute the dictionary overwrite some of the compression parameters + specified to ``ZstdCompressor``. + + :param data: + Dictionary data. + :param dict_type: + Type of dictionary. One of the ``DICT_TYPE_*`` constants. + """ + + def __init__(self, data, dict_type=DICT_TYPE_AUTO, k=0, d=0): + assert isinstance(data, bytes) + self._data = data + self.k = k + self.d = d + + if dict_type not in ( + DICT_TYPE_AUTO, + DICT_TYPE_RAWCONTENT, + DICT_TYPE_FULLDICT, + ): + raise ValueError( + "invalid dictionary load mode: %d; must use " + "DICT_TYPE_* constants" + ) + + self._dict_type = dict_type + self._cdict = None + + def __len__(self): + return len(self._data) + + def dict_id(self): + """Obtain the integer ID of the dictionary.""" + return int(lib.ZDICT_getDictID(self._data, len(self._data))) + + def as_bytes(self): + """Obtain the ``bytes`` representation of the dictionary.""" + return self._data + + def precompute_compress(self, level=0, compression_params=None): + """Precompute a dictionary os it can be used by multiple compressors. + + Calling this method on an instance that will be used by multiple + :py:class:`ZstdCompressor` instances will improve performance. + """ + if level and compression_params: + raise ValueError( + "must only specify one of level or " "compression_params" + ) + + if not level and not compression_params: + raise ValueError("must specify one of level or compression_params") + + if level: + cparams = lib.ZSTD_getCParams(level, 0, len(self._data)) + else: + cparams = ffi.new("ZSTD_compressionParameters") + cparams.chainLog = compression_params.chain_log + cparams.hashLog = compression_params.hash_log + cparams.minMatch = compression_params.min_match + cparams.searchLog = compression_params.search_log + cparams.strategy = compression_params.strategy + cparams.targetLength = compression_params.target_length + cparams.windowLog = compression_params.window_log + + cdict = lib.ZSTD_createCDict_advanced( + self._data, + len(self._data), + lib.ZSTD_dlm_byRef, + self._dict_type, + cparams, + lib.ZSTD_defaultCMem, + ) + if cdict == ffi.NULL: + raise ZstdError("unable to precompute dictionary") + + self._cdict = ffi.gc( + cdict, lib.ZSTD_freeCDict, size=lib.ZSTD_sizeof_CDict(cdict) + ) + + @property + def _ddict(self): + ddict = lib.ZSTD_createDDict_advanced( + self._data, + len(self._data), + lib.ZSTD_dlm_byRef, + self._dict_type, + lib.ZSTD_defaultCMem, + ) + + if ddict == ffi.NULL: + raise ZstdError("could not create decompression dict") + + ddict = ffi.gc( + ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict) + ) + self.__dict__["_ddict"] = ddict + + return ddict + + +def train_dictionary( + dict_size, + samples, + k=0, + d=0, + f=0, + split_point=0.0, + accel=0, + notifications=0, + dict_id=0, + level=0, + steps=0, + threads=0, +): + """Train a dictionary from sample data using the COVER algorithm. + + A compression dictionary of size ``dict_size`` will be created from the + iterable of ``samples``. The raw dictionary bytes will be returned. + + The dictionary training mechanism is known as *cover*. More details about it + are available in the paper *Effective Construction of Relative Lempel-Ziv + Dictionaries* (authors: Liao, Petri, Moffat, Wirth). + + The cover algorithm takes parameters ``k`` and ``d``. These are the + *segment size* and *dmer size*, respectively. The returned dictionary + instance created by this function has ``k`` and ``d`` attributes + containing the values for these parameters. If a ``ZstdCompressionDict`` + is constructed from raw bytes data (a content-only dictionary), the + ``k`` and ``d`` attributes will be ``0``. + + The segment and dmer size parameters to the cover algorithm can either be + specified manually or ``train_dictionary()`` can try multiple values + and pick the best one, where *best* means the smallest compressed data size. + This later mode is called *optimization* mode. + + Under the hood, this function always calls + ``ZDICT_optimizeTrainFromBuffer_fastCover()``. See the corresponding C library + documentation for more. + + If neither ``steps`` nor ``threads`` is defined, defaults for ``d``, ``steps``, + and ``level`` will be used that are equivalent with what + ``ZDICT_trainFromBuffer()`` would use. + + + :param dict_size: + Target size in bytes of the dictionary to generate. + :param samples: + A list of bytes holding samples the dictionary will be trained from. + :param k: + Segment size : constraint: 0 < k : Reasonable range [16, 2048+] + :param d: + dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] + :param f: + log of size of frequency array : constraint: 0 < f <= 31 : 1 means + default(20) + :param split_point: + Percentage of samples used for training: Only used for optimization. + The first # samples * ``split_point`` samples will be used to training. + The last # samples * (1 - split_point) samples will be used for testing. + 0 means default (0.75), 1.0 when all samples are used for both training + and testing. + :param accel: + Acceleration level: constraint: 0 < accel <= 10. Higher means faster + and less accurate, 0 means default(1). + :param dict_id: + Integer dictionary ID for the produced dictionary. Default is 0, which uses + a random value. + :param steps: + Number of steps through ``k`` values to perform when trying parameter + variations. + :param threads: + Number of threads to use when trying parameter variations. Default is 0, + which means to use a single thread. A negative value can be specified to + use as many threads as there are detected logical CPUs. + :param level: + Integer target compression level when trying parameter variations. + :param notifications: + Controls writing of informational messages to ``stderr``. ``0`` (the + default) means to write nothing. ``1`` writes errors. ``2`` writes + progression info. ``3`` writes more details. And ``4`` writes all info. + """ + + if not isinstance(samples, list): + raise TypeError("samples must be a list") + + if threads < 0: + threads = _cpu_count() + + if not steps and not threads: + d = d or 8 + steps = steps or 4 + level = level or 3 + + total_size = sum(map(len, samples)) + + samples_buffer = new_nonzero("char[]", total_size) + sample_sizes = new_nonzero("size_t[]", len(samples)) + + offset = 0 + for i, sample in enumerate(samples): + if not isinstance(sample, bytes): + raise ValueError("samples must be bytes") + + l = len(sample) + ffi.memmove(samples_buffer + offset, sample, l) + offset += l + sample_sizes[i] = l + + dict_data = new_nonzero("char[]", dict_size) + + dparams = ffi.new("ZDICT_fastCover_params_t *")[0] + dparams.k = k + dparams.d = d + dparams.f = f + dparams.steps = steps + dparams.nbThreads = threads + dparams.splitPoint = split_point + dparams.accel = accel + dparams.zParams.notificationLevel = notifications + dparams.zParams.dictID = dict_id + dparams.zParams.compressionLevel = level + + zresult = lib.ZDICT_optimizeTrainFromBuffer_fastCover( + ffi.addressof(dict_data), + dict_size, + ffi.addressof(samples_buffer), + ffi.addressof(sample_sizes, 0), + len(samples), + ffi.addressof(dparams), + ) + + if lib.ZDICT_isError(zresult): + msg = ffi.string(lib.ZDICT_getErrorName(zresult)).decode("utf-8") + raise ZstdError("cannot train dict: %s" % msg) + + return ZstdCompressionDict( + ffi.buffer(dict_data, zresult)[:], + dict_type=DICT_TYPE_FULLDICT, + k=dparams.k, + d=dparams.d, + ) + + +class ZstdDecompressionObj(object): + """A standard library API compatible decompressor. + + This type implements a compressor that conforms to the API by other + decompressors in Python's standard library. e.g. ``zlib.decompressobj`` + or ``bz2.BZ2Decompressor``. This allows callers to use zstd compression + while conforming to a similar API. + + Compressed data chunks are fed into ``decompress(data)`` and + uncompressed output (or an empty bytes) is returned. Output from + subsequent calls needs to be concatenated to reassemble the full + decompressed byte sequence. + + If ``read_across_frames=False``, each instance is single use: once an + input frame is decoded, ``decompress()`` will raise an exception. If + ``read_across_frames=True``, instances can decode multiple frames. + + >>> dctx = zstandard.ZstdDecompressor() + >>> dobj = dctx.decompressobj() + >>> data = dobj.decompress(compressed_chunk_0) + >>> data = dobj.decompress(compressed_chunk_1) + + By default, calls to ``decompress()`` write output data in chunks of size + ``DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE``. These chunks are concatenated + before being returned to the caller. It is possible to define the size of + these temporary chunks by passing ``write_size`` to ``decompressobj()``: + + >>> dctx = zstandard.ZstdDecompressor() + >>> dobj = dctx.decompressobj(write_size=1048576) + + .. note:: + + Because calls to ``decompress()`` may need to perform multiple + memory (re)allocations, this streaming decompression API isn't as + efficient as other APIs. + """ + + def __init__(self, decompressor, write_size, read_across_frames): + self._decompressor = decompressor + self._write_size = write_size + self._finished = False + self._read_across_frames = read_across_frames + self._unused_input = b"" + + def decompress(self, data): + """Send compressed data to the decompressor and obtain decompressed data. + + :param data: + Data to feed into the decompressor. + :return: + Decompressed bytes. + """ + if self._finished: + raise ZstdError("cannot use a decompressobj multiple times") + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + data_buffer = ffi.from_buffer(data) + + if len(data_buffer) == 0: + return b"" + + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + chunks = [] + + while True: + zresult = lib.ZSTD_decompressStream( + self._decompressor._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompressor error: %s" % _zstd_error(zresult) + ) + + # Always record any output from decompressor. + if out_buffer.pos: + chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + + # 0 is only seen when a frame is fully decoded *and* fully flushed. + # Behavior depends on whether we're in single or multiple frame + # mode. + if zresult == 0 and not self._read_across_frames: + # Mark the instance as done and make any unconsumed input available + # for retrieval. + self._finished = True + self._decompressor = None + self._unused_input = data[in_buffer.pos : in_buffer.size] + break + elif zresult == 0 and self._read_across_frames: + # We're at the end of a fully flushed frame and we can read more. + # Try to read more if there's any more input. + if in_buffer.pos == in_buffer.size: + break + else: + out_buffer.pos = 0 + + # We're not at the end of the frame *or* we're not fully flushed. + + # The decompressor will write out all the bytes it can to the output + # buffer. So if the output buffer is partially filled and the input + # is exhausted, there's nothing more to write. So we've done all we + # can. + elif ( + in_buffer.pos == in_buffer.size + and out_buffer.pos < out_buffer.size + ): + break + else: + out_buffer.pos = 0 + + return b"".join(chunks) + + def flush(self, length=0): + """Effectively a no-op. + + Implemented for compatibility with the standard library APIs. + + Safe to call at any time. + + :return: + Empty bytes. + """ + return b"" + + @property + def unused_data(self): + """Bytes past the end of compressed data. + + If ``decompress()`` is fed additional data beyond the end of a zstd + frame, this value will be non-empty once ``decompress()`` fully decodes + the input frame. + """ + return self._unused_input + + @property + def unconsumed_tail(self): + """Data that has not yet been fed into the decompressor.""" + return b"" + + @property + def eof(self): + """Whether the end of the compressed data stream has been reached.""" + return self._finished + + +class ZstdDecompressionReader(object): + """Read only decompressor that pull uncompressed data from another stream. + + This type provides a read-only stream interface for performing transparent + decompression from another stream or data source. It conforms to the + ``io.RawIOBase`` interface. Only methods relevant to reading are + implemented. + + >>> with open(path, 'rb') as fh: + >>> dctx = zstandard.ZstdDecompressor() + >>> reader = dctx.stream_reader(fh) + >>> while True: + ... chunk = reader.read(16384) + ... if not chunk: + ... break + ... # Do something with decompressed chunk. + + The stream can also be used as a context manager: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... with dctx.stream_reader(fh) as reader: + ... ... + + When used as a context manager, the stream is closed and the underlying + resources are released when the context manager exits. Future operations + against the stream will fail. + + The ``source`` argument to ``stream_reader()`` can be any object with a + ``read(size)`` method or any object implementing the *buffer protocol*. + + If the ``source`` is a stream, you can specify how large ``read()`` requests + to that stream should be via the ``read_size`` argument. It defaults to + ``zstandard.DECOMPRESSION_RECOMMENDED_INPUT_SIZE``.: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... # Will perform fh.read(8192) when obtaining data for the decompressor. + ... with dctx.stream_reader(fh, read_size=8192) as reader: + ... ... + + Instances are *partially* seekable. Absolute and relative positions + (``SEEK_SET`` and ``SEEK_CUR``) forward of the current position are + allowed. Offsets behind the current read position and offsets relative + to the end of stream are not allowed and will raise ``ValueError`` + if attempted. + + ``tell()`` returns the number of decompressed bytes read so far. + + Not all I/O methods are implemented. Notably missing is support for + ``readline()``, ``readlines()``, and linewise iteration support. This is + because streams operate on binary data - not text data. If you want to + convert decompressed output to text, you can chain an ``io.TextIOWrapper`` + to the stream: + + >>> with open(path, 'rb') as fh: + ... dctx = zstandard.ZstdDecompressor() + ... stream_reader = dctx.stream_reader(fh) + ... text_stream = io.TextIOWrapper(stream_reader, encoding='utf-8') + ... for line in text_stream: + ... ... + """ + + def __init__( + self, + decompressor, + source, + read_size, + read_across_frames, + closefd=True, + ): + self._decompressor = decompressor + self._source = source + self._read_size = read_size + self._read_across_frames = bool(read_across_frames) + self._closefd = bool(closefd) + self._entered = False + self._closed = False + self._bytes_decompressed = 0 + self._finished_input = False + self._finished_output = False + self._in_buffer = ffi.new("ZSTD_inBuffer *") + # Holds a ref to self._in_buffer.src. + self._source_buffer = None + + def __enter__(self): + if self._entered: + raise ValueError("cannot __enter__ multiple times") + + if self._closed: + raise ValueError("stream is closed") + + self._entered = True + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self._decompressor = None + self.close() + self._source = None + + return False + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def write(self, data): + raise io.UnsupportedOperation() + + def writelines(self, lines): + raise io.UnsupportedOperation() + + def isatty(self): + return False + + def flush(self): + return None + + def close(self): + if self._closed: + return None + + self._closed = True + + f = getattr(self._source, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def tell(self): + return self._bytes_decompressed + + def readall(self): + chunks = [] + + while True: + chunk = self.read(1048576) + if not chunk: + break + + chunks.append(chunk) + + return b"".join(chunks) + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + next = __next__ + + def _read_input(self): + # We have data left over in the input buffer. Use it. + if self._in_buffer.pos < self._in_buffer.size: + return + + # All input data exhausted. Nothing to do. + if self._finished_input: + return + + # Else populate the input buffer from our source. + if hasattr(self._source, "read"): + data = self._source.read(self._read_size) + + if not data: + self._finished_input = True + return + + self._source_buffer = ffi.from_buffer(data) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + else: + self._source_buffer = ffi.from_buffer(self._source) + self._in_buffer.src = self._source_buffer + self._in_buffer.size = len(self._source_buffer) + self._in_buffer.pos = 0 + + def _decompress_into_buffer(self, out_buffer): + """Decompress available input into an output buffer. + + Returns True if data in output buffer should be emitted. + """ + zresult = lib.ZSTD_decompressStream( + self._decompressor._dctx, out_buffer, self._in_buffer + ) + + if self._in_buffer.pos == self._in_buffer.size: + self._in_buffer.src = ffi.NULL + self._in_buffer.pos = 0 + self._in_buffer.size = 0 + self._source_buffer = None + + if not hasattr(self._source, "read"): + self._finished_input = True + + if lib.ZSTD_isError(zresult): + raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) + + # Emit data if there is data AND either: + # a) output buffer is full (read amount is satisfied) + # b) we're at end of a frame and not in frame spanning mode + return out_buffer.pos and ( + out_buffer.pos == out_buffer.size + or zresult == 0 + and not self._read_across_frames + ) + + def read(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if size == -1: + # This is recursive. But it gets the job done. + return self.readall() + + if self._finished_output or size == 0: + return b"" + + # We /could/ call into readinto() here. But that introduces more + # overhead. + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + while not self._finished_input: + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + while not self._finished_input: + self._read_input() + if self._decompress_into_buffer(out_buffer): + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + def read1(self, size=-1): + if self._closed: + raise ValueError("stream is closed") + + if size < -1: + raise ValueError("cannot read negative amounts less than -1") + + if self._finished_output or size == 0: + return b"" + + # -1 returns arbitrary number of bytes. + if size == -1: + size = DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE + + dst_buffer = ffi.new("char[]", size) + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dst_buffer + out_buffer.size = size + out_buffer.pos = 0 + + # read1() dictates that we can perform at most 1 call to underlying + # stream to get input. However, we can't satisfy this restriction with + # decompression because not all input generates output. So we allow + # multiple read(). But unlike read(), we stop once we have any output. + while not self._finished_input: + self._read_input() + self._decompress_into_buffer(out_buffer) + + if out_buffer.pos: + break + + self._bytes_decompressed += out_buffer.pos + return ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + + def readinto1(self, b): + if self._closed: + raise ValueError("stream is closed") + + if self._finished_output: + return 0 + + # TODO use writable=True once we require CFFI >= 1.12. + dest_buffer = ffi.from_buffer(b) + ffi.memmove(b, b"", 0) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + while not self._finished_input and not self._finished_output: + self._read_input() + self._decompress_into_buffer(out_buffer) + + if out_buffer.pos: + break + + self._bytes_decompressed += out_buffer.pos + return out_buffer.pos + + def seek(self, pos, whence=os.SEEK_SET): + if self._closed: + raise ValueError("stream is closed") + + read_amount = 0 + + if whence == os.SEEK_SET: + if pos < 0: + raise OSError("cannot seek to negative position with SEEK_SET") + + if pos < self._bytes_decompressed: + raise OSError( + "cannot seek zstd decompression stream " "backwards" + ) + + read_amount = pos - self._bytes_decompressed + + elif whence == os.SEEK_CUR: + if pos < 0: + raise OSError( + "cannot seek zstd decompression stream " "backwards" + ) + + read_amount = pos + elif whence == os.SEEK_END: + raise OSError( + "zstd decompression streams cannot be seeked " "with SEEK_END" + ) + + while read_amount: + result = self.read( + min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE) + ) + + if not result: + break + + read_amount -= len(result) + + return self._bytes_decompressed + + +class ZstdDecompressionWriter(object): + """ + Write-only stream wrapper that performs decompression. + + This type provides a writable stream that performs decompression and writes + decompressed data to another stream. + + This type implements the ``io.RawIOBase`` interface. Only methods that + involve writing will do useful things. + + Behavior is similar to :py:meth:`ZstdCompressor.stream_writer`: compressed + data is sent to the decompressor by calling ``write(data)`` and decompressed + output is written to the inner stream by calling its ``write(data)`` + method: + + >>> dctx = zstandard.ZstdDecompressor() + >>> decompressor = dctx.stream_writer(fh) + >>> # Will call fh.write() with uncompressed data. + >>> decompressor.write(compressed_data) + + Instances can be used as context managers. However, context managers add no + extra special behavior other than automatically calling ``close()`` when + they exit. + + Calling ``close()`` will mark the stream as closed and subsequent I/O + operations will raise ``ValueError`` (per the documented behavior of + ``io.RawIOBase``). ``close()`` will also call ``close()`` on the + underlying stream if such a method exists and the instance was created with + ``closefd=True``. + + The size of chunks to ``write()`` to the destination can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with dctx.stream_writer(fh, write_size=16384) as decompressor: + >>> pass + + You can see how much memory is being used by the decompressor: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with dctx.stream_writer(fh) as decompressor: + >>> byte_size = decompressor.memory_size() + + ``stream_writer()`` accepts a ``write_return_read`` boolean argument to control + the return value of ``write()``. When ``True`` (the default)``, ``write()`` + returns the number of bytes that were read from the input. When ``False``, + ``write()`` returns the number of bytes that were ``write()`` to the inner + stream. + """ + + def __init__( + self, + decompressor, + writer, + write_size, + write_return_read, + closefd=True, + ): + decompressor._ensure_dctx() + + self._decompressor = decompressor + self._writer = writer + self._write_size = write_size + self._write_return_read = bool(write_return_read) + self._closefd = bool(closefd) + self._entered = False + self._closing = False + self._closed = False + + def __enter__(self): + if self._closed: + raise ValueError("stream is closed") + + if self._entered: + raise ZstdError("cannot __enter__ multiple times") + + self._entered = True + + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._entered = False + self.close() + + return False + + def __iter__(self): + raise io.UnsupportedOperation() + + def __next__(self): + raise io.UnsupportedOperation() + + def memory_size(self): + return lib.ZSTD_sizeof_DCtx(self._decompressor._dctx) + + def close(self): + if self._closed: + return + + try: + self._closing = True + self.flush() + finally: + self._closing = False + self._closed = True + + f = getattr(self._writer, "close", None) + if self._closefd and f: + f() + + @property + def closed(self): + return self._closed + + def fileno(self): + f = getattr(self._writer, "fileno", None) + if f: + return f() + else: + raise OSError("fileno not available on underlying writer") + + def flush(self): + if self._closed: + raise ValueError("stream is closed") + + f = getattr(self._writer, "flush", None) + if f and not self._closing: + return f() + + def isatty(self): + return False + + def readable(self): + return False + + def readline(self, size=-1): + raise io.UnsupportedOperation() + + def readlines(self, hint=-1): + raise io.UnsupportedOperation() + + def seek(self, offset, whence=None): + raise io.UnsupportedOperation() + + def seekable(self): + return False + + def tell(self): + raise io.UnsupportedOperation() + + def truncate(self, size=None): + raise io.UnsupportedOperation() + + def writable(self): + return True + + def writelines(self, lines): + raise io.UnsupportedOperation() + + def read(self, size=-1): + raise io.UnsupportedOperation() + + def readall(self): + raise io.UnsupportedOperation() + + def readinto(self, b): + raise io.UnsupportedOperation() + + def write(self, data): + if self._closed: + raise ValueError("stream is closed") + + total_write = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + data_buffer = ffi.from_buffer(data) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + dst_buffer = ffi.new("char[]", self._write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + dctx = self._decompressor._dctx + + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) + total_write += out_buffer.pos + out_buffer.pos = 0 + + if self._write_return_read: + return in_buffer.pos + else: + return total_write + + +class ZstdDecompressor(object): + """ + Context for performing zstandard decompression. + + Each instance is essentially a wrapper around a ``ZSTD_DCtx`` from zstd's + C API. + + An instance can compress data various ways. Instances can be used multiple + times. + + The interface of this class is very similar to + :py:class:`zstandard.ZstdCompressor` (by design). + + Assume that each ``ZstdDecompressor`` instance can only handle a single + logical compression operation at the same time. i.e. if you call a method + like ``decompressobj()`` to obtain multiple objects derived from the same + ``ZstdDecompressor`` instance and attempt to use them simultaneously, errors + will likely occur. + + If you need to perform multiple logical decompression operations and you + can't guarantee those operations are temporally non-overlapping, you need + to obtain multiple ``ZstdDecompressor`` instances. + + Unless specified otherwise, assume that no two methods of + ``ZstdDecompressor`` instances can be called from multiple Python + threads simultaneously. In other words, assume instances are not thread safe + unless stated otherwise. + + :param dict_data: + Compression dictionary to use. + :param max_window_size: + Sets an upper limit on the window size for decompression operations in + kibibytes. This setting can be used to prevent large memory allocations + for inputs using large compression windows. + :param format: + Set the format of data for the decoder. + + By default this is ``zstandard.FORMAT_ZSTD1``. It can be set to + ``zstandard.FORMAT_ZSTD1_MAGICLESS`` to allow decoding frames without + the 4 byte magic header. Not all decompression APIs support this mode. + """ + + def __init__(self, dict_data=None, max_window_size=0, format=FORMAT_ZSTD1): + self._dict_data = dict_data + self._max_window_size = max_window_size + self._format = format + + dctx = lib.ZSTD_createDCtx() + if dctx == ffi.NULL: + raise MemoryError() + + self._dctx = dctx + + # Defer setting up garbage collection until full state is loaded so + # the memory size is more accurate. + try: + self._ensure_dctx() + finally: + self._dctx = ffi.gc( + dctx, lib.ZSTD_freeDCtx, size=lib.ZSTD_sizeof_DCtx(dctx) + ) + + def memory_size(self): + """Size of decompression context, in bytes. + + >>> dctx = zstandard.ZstdDecompressor() + >>> size = dctx.memory_size() + """ + return lib.ZSTD_sizeof_DCtx(self._dctx) + + def decompress( + self, + data, + max_output_size=0, + read_across_frames=False, + allow_extra_data=True, + ): + """ + Decompress data in a single operation. + + This method will decompress the input data in a single operation and + return the decompressed data. + + The input bytes are expected to contain at least 1 full Zstandard frame + (something compressed with :py:meth:`ZstdCompressor.compress` or + similar). If the input does not contain a full frame, an exception will + be raised. + + ``read_across_frames`` controls whether to read multiple zstandard + frames in the input. When False, decompression stops after reading the + first frame. This feature is not yet implemented but the argument is + provided for forward API compatibility when the default is changed to + True in a future release. For now, if you need to decompress multiple + frames, use an API like :py:meth:`ZstdCompressor.stream_reader` with + ``read_across_frames=True``. + + ``allow_extra_data`` controls how to handle extra input data after a + fully decoded frame. If False, any extra data (which could be a valid + zstd frame) will result in ``ZstdError`` being raised. If True, extra + data is silently ignored. The default will likely change to False in a + future release when ``read_across_frames`` defaults to True. + + If the input contains extra data after a full frame, that extra input + data is silently ignored. This behavior is undesirable in many scenarios + and will likely be changed or controllable in a future release (see + #181). + + If the frame header of the compressed data does not contain the content + size, ``max_output_size`` must be specified or ``ZstdError`` will be + raised. An allocation of size ``max_output_size`` will be performed and an + attempt will be made to perform decompression into that buffer. If the + buffer is too small or cannot be allocated, ``ZstdError`` will be + raised. The buffer will be resized if it is too large. + + Uncompressed data could be much larger than compressed data. As a result, + calling this function could result in a very large memory allocation + being performed to hold the uncompressed data. This could potentially + result in ``MemoryError`` or system memory swapping. If you don't need + the full output data in a single contiguous array in memory, consider + using streaming decompression for more resilient memory behavior. + + Usage: + + >>> dctx = zstandard.ZstdDecompressor() + >>> decompressed = dctx.decompress(data) + + If the compressed data doesn't have its content size embedded within it, + decompression can be attempted by specifying the ``max_output_size`` + argument: + + >>> dctx = zstandard.ZstdDecompressor() + >>> uncompressed = dctx.decompress(data, max_output_size=1048576) + + Ideally, ``max_output_size`` will be identical to the decompressed + output size. + + .. important:: + + If the exact size of decompressed data is unknown (not passed in + explicitly and not stored in the zstd frame), for performance + reasons it is encouraged to use a streaming API. + + :param data: + Compressed data to decompress. + :param max_output_size: + Integer max size of response. + + If ``0``, there is no limit and we can attempt to allocate an output + buffer of infinite size. + :return: + ``bytes`` representing decompressed output. + """ + + if read_across_frames: + raise ZstdError( + "ZstdDecompressor.read_across_frames=True is not yet implemented" + ) + + self._ensure_dctx() + + data_buffer = ffi.from_buffer(data) + + output_size = lib.ZSTD_getFrameContentSize( + data_buffer, len(data_buffer) + ) + + if output_size == lib.ZSTD_CONTENTSIZE_ERROR: + raise ZstdError("error determining content size from frame header") + elif output_size == 0: + return b"" + elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN: + if not max_output_size: + raise ZstdError( + "could not determine content size in frame header" + ) + + result_buffer = ffi.new("char[]", max_output_size) + result_size = max_output_size + output_size = 0 + else: + result_buffer = ffi.new("char[]", output_size) + result_size = output_size + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = result_buffer + out_buffer.size = result_size + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError("decompression error: %s" % _zstd_error(zresult)) + elif zresult: + raise ZstdError( + "decompression error: did not decompress full frame" + ) + elif output_size and out_buffer.pos != output_size: + raise ZstdError( + "decompression error: decompressed %d bytes; expected %d" + % (zresult, output_size) + ) + elif not allow_extra_data and in_buffer.pos < in_buffer.size: + count = in_buffer.size - in_buffer.pos + + raise ZstdError( + "compressed input contains %d bytes of unused data, which is disallowed" + % count + ) + + return ffi.buffer(result_buffer, out_buffer.pos)[:] + + def stream_reader( + self, + source, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + read_across_frames=False, + closefd=True, + ): + """ + Read-only stream wrapper that performs decompression. + + This method obtains an object that conforms to the ``io.RawIOBase`` + interface and performs transparent decompression via ``read()`` + operations. Source data is obtained by calling ``read()`` on a + source stream or object implementing the buffer protocol. + + See :py:class:`zstandard.ZstdDecompressionReader` for more documentation + and usage examples. + + :param source: + Source of compressed data to decompress. Can be any object + with a ``read(size)`` method or that conforms to the buffer protocol. + :param read_size: + Integer number of bytes to read from the source and feed into the + compressor at a time. + :param read_across_frames: + Whether to read data across multiple zstd frames. If False, + decompression is stopped at frame boundaries. + :param closefd: + Whether to close the source stream when this instance is closed. + :return: + :py:class:`zstandard.ZstdDecompressionReader`. + """ + self._ensure_dctx() + return ZstdDecompressionReader( + self, source, read_size, read_across_frames, closefd=closefd + ) + + def decompressobj( + self, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + read_across_frames=False, + ): + """Obtain a standard library compatible incremental decompressor. + + See :py:class:`ZstdDecompressionObj` for more documentation + and usage examples. + + :param write_size: size of internal output buffer to collect decompressed + chunks in. + :param read_across_frames: whether to read across multiple zstd frames. + If False, reading stops after 1 frame and subsequent decompress + attempts will raise an exception. + :return: + :py:class:`zstandard.ZstdDecompressionObj` + """ + if write_size < 1: + raise ValueError("write_size must be positive") + + self._ensure_dctx() + return ZstdDecompressionObj( + self, write_size=write_size, read_across_frames=read_across_frames + ) + + def read_to_iter( + self, + reader, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + skip_bytes=0, + ): + """Read compressed data to an iterator of uncompressed chunks. + + This method will read data from ``reader``, feed it to a decompressor, + and emit ``bytes`` chunks representing the decompressed result. + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh): + ... # Do something with original data. + + ``read_to_iter()`` accepts an object with a ``read(size)`` method that + will return compressed bytes or an object conforming to the buffer + protocol. + + ``read_to_iter()`` returns an iterator whose elements are chunks of the + decompressed data. + + The size of requested ``read()`` from the source can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh, read_size=16384): + ... pass + + It is also possible to skip leading bytes in the input data: + + >>> dctx = zstandard.ZstdDecompressor() + >>> for chunk in dctx.read_to_iter(fh, skip_bytes=1): + ... pass + + .. tip:: + + Skipping leading bytes is useful if the source data contains extra + *header* data. Traditionally, you would need to create a slice or + ``memoryview`` of the data you want to decompress. This would create + overhead. It is more efficient to pass the offset into this API. + + Similarly to :py:meth:`ZstdCompressor.read_to_iter`, the consumer of the + iterator controls when data is decompressed. If the iterator isn't consumed, + decompression is put on hold. + + When ``read_to_iter()`` is passed an object conforming to the buffer protocol, + the behavior may seem similar to what occurs when the simple decompression + API is used. However, this API works when the decompressed size is unknown. + Furthermore, if feeding large inputs, the decompressor will work in chunks + instead of performing a single operation. + + :param reader: + Source of compressed data. Can be any object with a + ``read(size)`` method or any object conforming to the buffer + protocol. + :param read_size: + Integer size of data chunks to read from ``reader`` and feed into + the decompressor. + :param write_size: + Integer size of data chunks to emit from iterator. + :param skip_bytes: + Integer number of bytes to skip over before sending data into + the decompressor. + :return: + Iterator of ``bytes`` representing uncompressed data. + """ + + if skip_bytes >= read_size: + raise ValueError("skip_bytes must be smaller than read_size") + + if hasattr(reader, "read"): + have_read = True + elif hasattr(reader, "__getitem__"): + have_read = False + buffer_offset = 0 + size = len(reader) + else: + raise ValueError( + "must pass an object with a read() method or " + "conforms to buffer protocol" + ) + + if skip_bytes: + if have_read: + reader.read(skip_bytes) + else: + if skip_bytes > size: + raise ValueError("skip_bytes larger than first input chunk") + + buffer_offset = skip_bytes + + self._ensure_dctx() + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = len(dst_buffer) + out_buffer.pos = 0 + + while True: + assert out_buffer.pos == 0 + + if have_read: + read_result = reader.read(read_size) + else: + remaining = size - buffer_offset + slice_size = min(remaining, read_size) + read_result = reader[buffer_offset : buffer_offset + slice_size] + buffer_offset += slice_size + + # No new input. Break out of read loop. + if not read_result: + break + + # Feed all read data into decompressor and emit output until + # exhausted. + read_buffer = ffi.from_buffer(read_result) + in_buffer.src = read_buffer + in_buffer.size = len(read_buffer) + in_buffer.pos = 0 + + while in_buffer.pos < in_buffer.size: + assert out_buffer.pos == 0 + + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + out_buffer.pos = 0 + yield data + + if zresult == 0: + return + + # Repeat loop to collect more input data. + continue + + # If we get here, input is exhausted. + + def stream_writer( + self, + writer, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + write_return_read=True, + closefd=True, + ): + """ + Push-based stream wrapper that performs decompression. + + This method constructs a stream wrapper that conforms to the + ``io.RawIOBase`` interface and performs transparent decompression + when writing to a wrapper stream. + + See :py:class:`zstandard.ZstdDecompressionWriter` for more documentation + and usage examples. + + :param writer: + Destination for decompressed output. Can be any object with a + ``write(data)``. + :param write_size: + Integer size of chunks to ``write()`` to ``writer``. + :param write_return_read: + Whether ``write()`` should return the number of bytes of input + consumed. If False, ``write()`` returns the number of bytes sent + to the inner stream. + :param closefd: + Whether to ``close()`` the inner stream when this stream is closed. + :return: + :py:class:`zstandard.ZstdDecompressionWriter` + """ + if not hasattr(writer, "write"): + raise ValueError("must pass an object with a write() method") + + return ZstdDecompressionWriter( + self, + writer, + write_size, + write_return_read, + closefd=closefd, + ) + + def copy_stream( + self, + ifh, + ofh, + read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE, + write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, + ): + """ + Copy data between streams, decompressing in the process. + + Compressed data will be read from ``ifh``, decompressed, and written + to ``ofh``. + + >>> dctx = zstandard.ZstdDecompressor() + >>> dctx.copy_stream(ifh, ofh) + + e.g. to decompress a file to another file: + + >>> dctx = zstandard.ZstdDecompressor() + >>> with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh: + ... dctx.copy_stream(ifh, ofh) + + The size of chunks being ``read()`` and ``write()`` from and to the + streams can be specified: + + >>> dctx = zstandard.ZstdDecompressor() + >>> dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384) + + :param ifh: + Source stream to read compressed data from. + + Must have a ``read()`` method. + :param ofh: + Destination stream to write uncompressed data to. + + Must have a ``write()`` method. + :param read_size: + The number of bytes to ``read()`` from the source in a single + operation. + :param write_size: + The number of bytes to ``write()`` to the destination in a single + operation. + :return: + 2-tuple of integers representing the number of bytes read and + written, respectively. + """ + + if not hasattr(ifh, "read"): + raise ValueError("first argument must have a read() method") + if not hasattr(ofh, "write"): + raise ValueError("second argument must have a write() method") + + self._ensure_dctx() + + in_buffer = ffi.new("ZSTD_inBuffer *") + out_buffer = ffi.new("ZSTD_outBuffer *") + + dst_buffer = ffi.new("char[]", write_size) + out_buffer.dst = dst_buffer + out_buffer.size = write_size + out_buffer.pos = 0 + + total_read, total_write = 0, 0 + + # Read all available input. + while True: + data = ifh.read(read_size) + if not data: + break + + data_buffer = ffi.from_buffer(data) + total_read += len(data_buffer) + in_buffer.src = data_buffer + in_buffer.size = len(data_buffer) + in_buffer.pos = 0 + + # Flush all read data to output. + while in_buffer.pos < in_buffer.size: + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "zstd decompressor error: %s" % _zstd_error(zresult) + ) + + if out_buffer.pos: + ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) + total_write += out_buffer.pos + out_buffer.pos = 0 + + # Continue loop to keep reading. + + return total_read, total_write + + def decompress_content_dict_chain(self, frames): + """ + Decompress a series of frames using the content dictionary chaining technique. + + Such a list of frames is produced by compressing discrete inputs where + each non-initial input is compressed with a *prefix* dictionary consisting + of the content of the previous input. + + For example, say you have the following inputs: + + >>> inputs = [b"input 1", b"input 2", b"input 3"] + + The zstd frame chain consists of: + + 1. ``b"input 1"`` compressed in standalone/discrete mode + 2. ``b"input 2"`` compressed using ``b"input 1"`` as a *prefix* dictionary + 3. ``b"input 3"`` compressed using ``b"input 2"`` as a *prefix* dictionary + + Each zstd frame **must** have the content size written. + + The following Python code can be used to produce a *prefix dictionary chain*: + + >>> def make_chain(inputs): + ... frames = [] + ... + ... # First frame is compressed in standalone/discrete mode. + ... zctx = zstandard.ZstdCompressor() + ... frames.append(zctx.compress(inputs[0])) + ... + ... # Subsequent frames use the previous fulltext as a prefix dictionary + ... for i, raw in enumerate(inputs[1:]): + ... dict_data = zstandard.ZstdCompressionDict( + ... inputs[i], dict_type=zstandard.DICT_TYPE_RAWCONTENT) + ... zctx = zstandard.ZstdCompressor(dict_data=dict_data) + ... frames.append(zctx.compress(raw)) + ... + ... return frames + + ``decompress_content_dict_chain()`` returns the uncompressed data of the last + element in the input chain. + + .. note:: + + It is possible to implement *prefix dictionary chain* decompression + on top of other APIs. However, this function will likely be faster - + especially for long input chains - as it avoids the overhead of + instantiating and passing around intermediate objects between + multiple functions. + + :param frames: + List of ``bytes`` holding compressed zstd frames. + :return: + """ + if not isinstance(frames, list): + raise TypeError("argument must be a list") + + if not frames: + raise ValueError("empty input chain") + + # First chunk should not be using a dictionary. We handle it specially. + chunk = frames[0] + if not isinstance(chunk, bytes): + raise ValueError("chunk 0 must be bytes") + + # All chunks should be zstd frames and should have content size set. + chunk_buffer = ffi.from_buffer(chunk) + params = ffi.new("ZSTD_frameHeader *") + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) + if lib.ZSTD_isError(zresult): + raise ValueError("chunk 0 is not a valid zstd frame") + elif zresult: + raise ValueError("chunk 0 is too small to contain a zstd frame") + + if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: + raise ValueError("chunk 0 missing content size in frame") + + self._ensure_dctx(load_dict=False) + + last_buffer = ffi.new("char[]", params.frameContentSize) + + out_buffer = ffi.new("ZSTD_outBuffer *") + out_buffer.dst = last_buffer + out_buffer.size = len(last_buffer) + out_buffer.pos = 0 + + in_buffer = ffi.new("ZSTD_inBuffer *") + in_buffer.src = chunk_buffer + in_buffer.size = len(chunk_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not decompress chunk 0: %s" % _zstd_error(zresult) + ) + elif zresult: + raise ZstdError("chunk 0 did not decompress full frame") + + # Special case of chain length of 1 + if len(frames) == 1: + return ffi.buffer(last_buffer, len(last_buffer))[:] + + i = 1 + while i < len(frames): + chunk = frames[i] + if not isinstance(chunk, bytes): + raise ValueError("chunk %d must be bytes" % i) + + chunk_buffer = ffi.from_buffer(chunk) + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) + if lib.ZSTD_isError(zresult): + raise ValueError("chunk %d is not a valid zstd frame" % i) + elif zresult: + raise ValueError( + "chunk %d is too small to contain a zstd frame" % i + ) + + if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: + raise ValueError("chunk %d missing content size in frame" % i) + + dest_buffer = ffi.new("char[]", params.frameContentSize) + + out_buffer.dst = dest_buffer + out_buffer.size = len(dest_buffer) + out_buffer.pos = 0 + + in_buffer.src = chunk_buffer + in_buffer.size = len(chunk_buffer) + in_buffer.pos = 0 + + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "could not decompress chunk %d: %s" % _zstd_error(zresult) + ) + elif zresult: + raise ZstdError("chunk %d did not decompress full frame" % i) + + last_buffer = dest_buffer + i += 1 + + return ffi.buffer(last_buffer, len(last_buffer))[:] + + def multi_decompress_to_buffer( + self, frames, decompressed_sizes=None, threads=0 + ): + """ + Decompress multiple zstd frames to output buffers as a single operation. + + (Experimental. Not available in CFFI backend.) + + Compressed frames can be passed to the function as a + ``BufferWithSegments``, a ``BufferWithSegmentsCollection``, or as a + list containing objects that conform to the buffer protocol. For best + performance, pass a ``BufferWithSegmentsCollection`` or a + ``BufferWithSegments``, as minimal input validation will be done for + that type. If calling from Python (as opposed to C), constructing one + of these instances may add overhead cancelling out the performance + overhead of validation for list inputs. + + Returns a ``BufferWithSegmentsCollection`` containing the decompressed + data. All decompressed data is allocated in a single memory buffer. The + ``BufferWithSegments`` instance tracks which objects are at which offsets + and their respective lengths. + + >>> dctx = zstandard.ZstdDecompressor() + >>> results = dctx.multi_decompress_to_buffer([b'...', b'...']) + + The decompressed size of each frame MUST be discoverable. It can either be + embedded within the zstd frame or passed in via the ``decompressed_sizes`` + argument. + + The ``decompressed_sizes`` argument is an object conforming to the buffer + protocol which holds an array of 64-bit unsigned integers in the machine's + native format defining the decompressed sizes of each frame. If this argument + is passed, it avoids having to scan each frame for its decompressed size. + This frame scanning can add noticeable overhead in some scenarios. + + >>> frames = [...] + >>> sizes = struct.pack('=QQQQ', len0, len1, len2, len3) + >>> + >>> dctx = zstandard.ZstdDecompressor() + >>> results = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes) + + .. note:: + + It is possible to pass a ``mmap.mmap()`` instance into this function by + wrapping it with a ``BufferWithSegments`` instance (which will define the + offsets of frames within the memory mapped region). + + This function is logically equivalent to performing + :py:meth:`ZstdCompressor.decompress` on each input frame and returning the + result. + + This function exists to perform decompression on multiple frames as fast + as possible by having as little overhead as possible. Since decompression is + performed as a single operation and since the decompressed output is stored in + a single buffer, extra memory allocations, Python objects, and Python function + calls are avoided. This is ideal for scenarios where callers know up front that + they need to access data for multiple frames, such as when *delta chains* are + being used. + + Currently, the implementation always spawns multiple threads when requested, + even if the amount of work to do is small. In the future, it will be smarter + about avoiding threads and their associated overhead when the amount of + work to do is small. + + :param frames: + Source defining zstd frames to decompress. + :param decompressed_sizes: + Array of integers representing sizes of decompressed zstd frames. + :param threads: + How many threads to use for decompression operations. + + Negative values will use the same number of threads as logical CPUs + on the machine. Values ``0`` or ``1`` use a single thread. + :return: + ``BufferWithSegmentsCollection`` + """ + raise NotImplementedError() + + def _ensure_dctx(self, load_dict=True): + lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only) + + if self._max_window_size: + zresult = lib.ZSTD_DCtx_setMaxWindowSize( + self._dctx, self._max_window_size + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set max window size: %s" % _zstd_error(zresult) + ) + + zresult = lib.ZSTD_DCtx_setParameter( + self._dctx, lib.ZSTD_d_format, self._format + ) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to set decoding format: %s" % _zstd_error(zresult) + ) + + if self._dict_data and load_dict: + zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict) + if lib.ZSTD_isError(zresult): + raise ZstdError( + "unable to reference prepared dictionary: %s" + % _zstd_error(zresult) + ) diff --git a/venv/lib/python3.10/site-packages/zstandard/py.typed b/venv/lib/python3.10/site-packages/zstandard/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391