diff --git a/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2551ac7ca85372a54ca62a7ab5068ac960017e30 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14852ba2ff663473d4b8abadecc2acb8900a8c3f39c5b72d4dd63dd29e9c82ef +size 16778396 diff --git a/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..650eefaf3618fe04db37c4138dd505214b863e4a --- /dev/null +++ b/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d05c261acb81f9b7773b2194fbfd1eb29698fb841d5e984d5ec646218bf1b14e +size 16778396 diff --git a/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..67d57c56eab6eeaef563a2a63d26fb4020519663 --- /dev/null +++ b/ckpts/universal/global_step20/zero/8.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30e6c4a37dd730dedab85a42489ba79ea6a66d3536431218494667a67c4ea94 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9bd2ffe1a7014da98d04f8bb4175664339147a5b --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0679da1c234a6312fbb3a1a60250d18394de75c4a8e9eabd915e2f57355b62e +size 33555627 diff --git a/lm-evaluation-harness/tests/testdata/gpt3_test_f307d52964c295e2005c5e782b688c24388e0cecadf29f1e6fc7f394236ea9c0.pkl b/lm-evaluation-harness/tests/testdata/gpt3_test_f307d52964c295e2005c5e782b688c24388e0cecadf29f1e6fc7f394236ea9c0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e056fc1afdd78b1d7bec2610bc4e8962ba816bde --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/gpt3_test_f307d52964c295e2005c5e782b688c24388e0cecadf29f1e6fc7f394236ea9c0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f11de4b3d45d1590ba78935e824ae86ef75bbc370df500f89dde2c397d11c01a +size 1297 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_0a89c2739f9598b4be2674b0a8e43931d7f3f0b696970bcba31f9b52bdf12297.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_0a89c2739f9598b4be2674b0a8e43931d7f3f0b696970bcba31f9b52bdf12297.pkl new file mode 100644 index 0000000000000000000000000000000000000000..258d73cd68b190d87670edd3c11210c97e59ab91 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_0a89c2739f9598b4be2674b0a8e43931d7f3f0b696970bcba31f9b52bdf12297.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd0a3c44334dc2b7c48aa448d0a2c2ffde3c9a28e6c29d4ed175cbb22334bef3 +size 1805 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_0c1c14571add7903b89e588c8212572b95bb57b334fc0752c89a7e045a5f63ae.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_0c1c14571add7903b89e588c8212572b95bb57b334fc0752c89a7e045a5f63ae.pkl new file mode 100644 index 0000000000000000000000000000000000000000..df0fab88f67ddcaeb75bb3436da9d944aef821f3 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_0c1c14571add7903b89e588c8212572b95bb57b334fc0752c89a7e045a5f63ae.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0767f0abf685cd948057def299fa0b97dc9ebdad4e356dd708a7d4bde45ba71 +size 1853 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_6d6c62dd70caaa208712bf766deaf419cfac89538d4ab7745621e339394c0c23.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_6d6c62dd70caaa208712bf766deaf419cfac89538d4ab7745621e339394c0c23.pkl new file mode 100644 index 0000000000000000000000000000000000000000..1c627edfd96299ad364c96a4eae2ac15f4acea88 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_6d6c62dd70caaa208712bf766deaf419cfac89538d4ab7745621e339394c0c23.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36023aa22487e0d2de08cb3ecabd0cdbd6c887c63c7006b3544b7809bfcb58bc +size 1806 diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ae2f9a7b76899e422084b04cad2cd1447bed350 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682602 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..8e24a5915bb50f3a86053a39a87200bb5e1ede27 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:10:30:02,599 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:07,079 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:07,081 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:07,081 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:10:30:13,626 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:10:30:13,626 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 10:30:13,248] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:10:30:15,078 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,078 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:10:30:15,097 INFO [task.py:395] Building contexts for indiccopa-hi on rank 2... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 110221.03it/s] +Passed argument batch_size = auto:1. Detecting largest batch size +2024-05-14:10:30:16,729 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..7c7c336bc6ef7bac22399f8a8a9cef758de4b15e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:02.466481", + "startedAt": "2024-05-14T10:30:02.042367", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.375447368421, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92179107666016 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..c3ad6a3530e72181b3b2e93339791388b04999cd --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 21}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/run-s6mpkihm.wandb b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/run-s6mpkihm.wandb new file mode 100644 index 0000000000000000000000000000000000000000..85fe7a1a6a00a5f46cceeeffe6fa0be5283fcf5c Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103002-s6mpkihm/run-s6mpkihm.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..3d8365cf544f6630223fcb9676e3cc6f4b1e69d3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:31:17,654 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:31:22,053 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:31:22,056 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:31:22,057 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step20'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step20'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/run-q880yywu.wandb b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/run-q880yywu.wandb new file mode 100644 index 0000000000000000000000000000000000000000..95c486b8ef589b73be37cf17269249d77f34b123 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103117-q880yywu/run-q880yywu.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3dcb7a04ca47a7eb8242ce67632d8ec27a02c3a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716396053 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..bef4ee0368c8b51a41717f69409664c2b3bbee40 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:16:40:54,677 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:16:41:03,813 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:16:41:03,814 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:16:41:03,814 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-22:16:41:06,099 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..ac766cf56c6f29ded47832390e878fc1ef3d2126 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T16:40:54.469634", + "startedAt": "2024-05-22T16:40:53.881781", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.5, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.63535690307617 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..b9808f3983176475e24f71587a48d7b9194d61c0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug-internal.log @@ -0,0 +1,179 @@ +2024-05-22 16:40:53,902 INFO StreamThr :800 [internal.py:wandb_internal():85] W&B internal server running at pid: 800, started at: 2024-05-22 16:40:53.901149 +2024-05-22 16:40:53,907 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: status +2024-05-22 16:40:53,908 INFO WriterThread:800 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/run-70sdzmdf.wandb +2024-05-22 16:40:53,910 DEBUG SenderThread:800 [sender.py:send():378] send: header +2024-05-22 16:40:53,914 DEBUG SenderThread:800 [sender.py:send():378] send: run +2024-05-22 16:40:54,233 INFO SenderThread:800 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files +2024-05-22 16:40:54,234 INFO SenderThread:800 [sender.py:_start_run_threads():1123] run started: 70sdzmdf with start time 1716396053.901715 +2024-05-22 16:40:54,238 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 16:40:54,238 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: check_version +2024-05-22 16:40:54,357 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 16:40:54,359 DEBUG HandlerThread:800 [system_info.py:__init__():26] System info init +2024-05-22 16:40:54,359 DEBUG HandlerThread:800 [system_info.py:__init__():41] System info init done +2024-05-22 16:40:54,359 INFO HandlerThread:800 [system_monitor.py:start():194] Starting system monitor +2024-05-22 16:40:54,359 INFO SystemMonitor:800 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 16:40:54,359 INFO HandlerThread:800 [system_monitor.py:probe():214] Collecting system info +2024-05-22 16:40:54,366 INFO SystemMonitor:800 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 16:40:54,367 INFO SystemMonitor:800 [interfaces.py:start():188] Started disk monitoring +2024-05-22 16:40:54,368 INFO SystemMonitor:800 [interfaces.py:start():188] Started memory monitoring +2024-05-22 16:40:54,369 INFO SystemMonitor:800 [interfaces.py:start():188] Started network monitoring +2024-05-22 16:40:54,469 DEBUG HandlerThread:800 [system_info.py:probe():150] Probing system +2024-05-22 16:40:54,472 DEBUG HandlerThread:800 [system_info.py:_probe_git():135] Probing git +2024-05-22 16:40:54,483 ERROR HandlerThread:800 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 16:40:54,483 DEBUG HandlerThread:800 [system_info.py:_probe_git():143] Probing git done +2024-05-22 16:40:54,483 DEBUG HandlerThread:800 [system_info.py:probe():198] Probing system done +2024-05-22 16:40:54,483 DEBUG HandlerThread:800 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T16:40:54.469634', 'startedAt': '2024-05-22T16:40:53.881781', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.5, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63535690307617}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 16:40:54,483 INFO HandlerThread:800 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 16:40:54,483 INFO HandlerThread:800 [system_monitor.py:probe():227] Publishing system info +2024-05-22 16:40:54,486 INFO HandlerThread:800 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 16:40:54,491 DEBUG SenderThread:800 [sender.py:send():378] send: files +2024-05-22 16:40:54,491 INFO SenderThread:800 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 16:40:54,669 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 16:40:54,670 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: python_packages +2024-05-22 16:40:54,670 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 16:40:54,673 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: stop_status +2024-05-22 16:40:54,752 DEBUG SenderThread:800 [sender.py:send():378] send: telemetry +2024-05-22 16:40:55,171 INFO wandb-upload_0:800 [upload_job.py:push():130] Uploaded file /tmp/tmpqqyeghg8wandb/yxzvar76-wandb-metadata.json +2024-05-22 16:40:55,236 INFO Thread-12 :800 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log +2024-05-22 16:40:55,236 INFO Thread-12 :800 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-metadata.json +2024-05-22 16:40:55,236 INFO Thread-12 :800 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt +2024-05-22 16:40:57,236 INFO Thread-12 :800 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log +2024-05-22 16:40:59,754 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:41:04,815 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:41:05,243 INFO Thread-12 :800 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log +2024-05-22 16:41:06,109 DEBUG SenderThread:800 [sender.py:send():378] send: exit +2024-05-22 16:41:06,109 INFO SenderThread:800 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 16:41:06,109 INFO SenderThread:800 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 16:41:06,110 INFO SenderThread:800 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 16:41:06,110 INFO SenderThread:800 [sender.py:send_exit():593] send defer +2024-05-22 16:41:06,111 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,111 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 16:41:06,111 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,111 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 16:41:06,111 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 1 +2024-05-22 16:41:06,111 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,111 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 16:41:06,111 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,111 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 16:41:06,111 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 2 +2024-05-22 16:41:06,111 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,111 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 16:41:06,111 INFO HandlerThread:800 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 16:41:06,111 DEBUG SystemMonitor:800 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 16:41:06,111 DEBUG SystemMonitor:800 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 16:41:06,112 DEBUG SystemMonitor:800 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 16:41:06,112 INFO HandlerThread:800 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 16:41:06,113 INFO HandlerThread:800 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 16:41:06,113 INFO HandlerThread:800 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 16:41:06,113 INFO HandlerThread:800 [interfaces.py:finish():200] Joined network monitor +2024-05-22 16:41:06,113 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,113 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 16:41:06,113 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 3 +2024-05-22 16:41:06,113 DEBUG SenderThread:800 [sender.py:send():378] send: stats +2024-05-22 16:41:06,113 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,113 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 16:41:06,114 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,114 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 16:41:06,114 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 4 +2024-05-22 16:41:06,114 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,114 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 16:41:06,114 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,114 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 16:41:06,114 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 5 +2024-05-22 16:41:06,114 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,114 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 16:41:06,114 DEBUG SenderThread:800 [sender.py:send():378] send: summary +2024-05-22 16:41:06,115 INFO SenderThread:800 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 16:41:06,115 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,115 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 16:41:06,115 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 6 +2024-05-22 16:41:06,115 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,115 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 16:41:06,115 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,116 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 16:41:06,120 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 16:41:06,186 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 7 +2024-05-22 16:41:06,186 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,187 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 16:41:06,187 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,187 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 16:41:06,244 INFO Thread-12 :800 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml +2024-05-22 16:41:06,244 INFO Thread-12 :800 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json +2024-05-22 16:41:06,776 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 8 +2024-05-22 16:41:06,776 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,776 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 16:41:06,777 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,777 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 16:41:06,777 INFO SenderThread:800 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 16:41:06,777 INFO SenderThread:800 [job_builder.py:_get_source_type():576] no source found +2024-05-22 16:41:06,777 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 9 +2024-05-22 16:41:06,777 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:06,777 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 16:41:06,778 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:06,778 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 16:41:06,778 INFO SenderThread:800 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 16:41:07,108 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:41:07,246 INFO SenderThread:800 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log +2024-05-22 16:41:07,246 INFO SenderThread:800 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files +2024-05-22 16:41:07,246 INFO SenderThread:800 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json wandb-summary.json +2024-05-22 16:41:07,246 INFO SenderThread:800 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log output.log +2024-05-22 16:41:07,248 INFO SenderThread:800 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-metadata.json wandb-metadata.json +2024-05-22 16:41:07,250 INFO SenderThread:800 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml config.yaml +2024-05-22 16:41:07,251 INFO SenderThread:800 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt requirements.txt +2024-05-22 16:41:07,251 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 10 +2024-05-22 16:41:07,251 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:41:07,251 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:07,251 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 16:41:07,251 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:07,253 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 16:41:07,253 INFO SenderThread:800 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 16:41:07,589 INFO wandb-upload_0:800 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/wandb-summary.json +2024-05-22 16:41:07,798 INFO wandb-upload_1:800 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/output.log +2024-05-22 16:41:07,856 INFO wandb-upload_2:800 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/config.yaml +2024-05-22 16:41:07,876 INFO wandb-upload_3:800 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/files/requirements.txt +2024-05-22 16:41:08,077 INFO Thread-11 (_thread_body):800 [sender.py:transition_state():613] send defer: 11 +2024-05-22 16:41:08,077 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:08,077 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 16:41:08,077 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:08,077 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 16:41:08,077 INFO SenderThread:800 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 16:41:08,077 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 12 +2024-05-22 16:41:08,078 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:08,078 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 16:41:08,078 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:08,078 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 16:41:08,078 INFO SenderThread:800 [file_stream.py:finish():601] file stream finish called +2024-05-22 16:41:08,109 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:41:08,165 INFO SenderThread:800 [file_stream.py:finish():605] file stream finish is done +2024-05-22 16:41:08,165 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 13 +2024-05-22 16:41:08,165 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:41:08,165 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:08,165 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 16:41:08,166 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:08,166 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 16:41:08,166 INFO SenderThread:800 [sender.py:transition_state():613] send defer: 14 +2024-05-22 16:41:08,166 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: defer +2024-05-22 16:41:08,166 INFO HandlerThread:800 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 16:41:08,166 DEBUG SenderThread:800 [sender.py:send():378] send: final +2024-05-22 16:41:08,166 DEBUG SenderThread:800 [sender.py:send():378] send: footer +2024-05-22 16:41:08,166 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: defer +2024-05-22 16:41:08,166 INFO SenderThread:800 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 16:41:08,167 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 16:41:08,167 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:41:08,168 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 16:41:08,168 DEBUG SenderThread:800 [sender.py:send_request():405] send_request: server_info +2024-05-22 16:41:08,237 INFO MainThread:800 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 16:41:08,237 INFO MainThread:800 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 16:41:08,237 INFO MainThread:800 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 16:41:08,237 DEBUG HandlerThread:800 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 16:41:08,237 INFO HandlerThread:800 [handler.py:finish():882] shutting down handler diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..77f8b4bb52046ea96d69308a26e4ca01a8a69b86 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Configure stats pid to 644 +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 16:40:53,895 WARNING MainThread:644 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 16:40:53,895 INFO MainThread:644 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug.log +2024-05-22 16:40:53,896 INFO MainThread:644 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/logs/debug-internal.log +2024-05-22 16:40:53,896 INFO MainThread:644 [wandb_init.py:init():560] calling init triggers +2024-05-22 16:40:53,896 INFO MainThread:644 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 16:40:53,896 INFO MainThread:644 [wandb_init.py:init():610] starting backend +2024-05-22 16:40:53,896 INFO MainThread:644 [wandb_init.py:init():614] setting up manager +2024-05-22 16:40:53,900 INFO MainThread:644 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 16:40:53,901 INFO MainThread:644 [wandb_init.py:init():622] backend started and connected +2024-05-22 16:40:53,904 INFO MainThread:644 [wandb_init.py:init():711] updated telemetry +2024-05-22 16:40:53,913 INFO MainThread:644 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 16:40:54,237 INFO MainThread:644 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 16:40:54,350 INFO MainThread:644 [wandb_run.py:_on_init():2405] got version response +2024-05-22 16:40:54,350 INFO MainThread:644 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 16:40:54,671 INFO MainThread:644 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 16:40:54,671 INFO MainThread:644 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 16:40:54,671 INFO MainThread:644 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 16:40:54,671 INFO MainThread:644 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 16:40:54,674 INFO MainThread:644 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/run-70sdzmdf.wandb b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/run-70sdzmdf.wandb new file mode 100644 index 0000000000000000000000000000000000000000..2491bcd80d47fa1f5d35a63a04a0b787602db7c0 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_164053-70sdzmdf/run-70sdzmdf.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..312a7c9c0d63bd4c17e22571bc8d3684b22bd672 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/config.yaml @@ -0,0 +1,284 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717073508 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 13 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/media/table/evaluation/eval_results_1_f6c5ffdda60541ca1d9f.table.json b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/media/table/evaluation/eval_results_1_f6c5ffdda60541ca1d9f.table.json new file mode 100644 index 0000000000000000000000000000000000000000..c5fc14d9a169924b46a681afe26fedd6d8293700 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/media/table/evaluation/eval_results_1_f6c5ffdda60541ca1d9f.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.5043409629044988", "0.0141"], ["sst2", 1.0, "none", 0, "acc", "0.49770642201834864", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.528835690968444", "0.0116"], ["piqa", 1.0, "none", 0, "acc_norm", "0.5097932535364527", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.3161764705882353", "0.0230"], ["mrpc", 1.0, "none", 0, "f1", "0.0", "0.0000"], ["copa", 1.0, "none", 0, "acc", "0.54", "0.0501"], ["boolq", 2.0, "none", 0, "acc", "0.3782874617737003", "0.0085"], ["arc_easy", 1.0, "none", 0, "acc", "0.26136363636363635", "0.0090"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2622053872053872", "0.0090"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/output.log b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..b230fe5ca73b572370127597e559183becb09a46 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_125148-ddhu1ki8/files/output.log @@ -0,0 +1,549 @@ + +2024-05-30:12:51:49,570 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:12:51:58,826 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:12:51:58,827 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:12:51:58,828 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer'} +2024-05-30:12:52:01,173 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 16.7MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 1.30MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 2.91MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 212kB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 62398.00 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 319607.01 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 150950.45 examples/s] +2024-05-30:12:52:32,725 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:52:32,726 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 39.7MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 30.5MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 35.6MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 22295.61 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22421.99 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 23363.77 examples/s] +2024-05-30:12:52:36,700 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:52:36,700 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 49.8MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16380.48 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 12630.40 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 17361.39 examples/s] +2024-05-30:12:52:38,954 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:52:38,955 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-30:12:52:38,955 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-30:12:52:38,958 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 42.2MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 2.82MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 493kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 1.96MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 410488.73 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 175749.82 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 383910.35 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 11.7MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.6MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.15MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 16.5MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23714.34 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24629.11 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 23946.83 examples/s] +2024-05-30:12:52:50,849 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-30:12:52:50,850 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 15.6MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 155kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 798kB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1393425.48 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 395740.43 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 467918.13 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 12.2MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 19.8MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 6.97MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 24529.38 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24393.19 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 22726.79 examples/s] +2024-05-30:12:53:05,085 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 69101.52it/s] +2024-05-30:12:53:05,170 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2536.42it/s] +2024-05-30:12:53:05,545 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1092.54it/s] +2024-05-30:12:53:07,304 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1819.61it/s] +2024-05-30:12:53:07,549 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 60558.82it/s] +2024-05-30:12:53:07,558 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1986.98it/s] +2024-05-30:12:53:09,338 INFO [task.py:395] Building contexts for arc_easy on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1066.01it/s] +2024-05-30:12:53:11,719 INFO [evaluator.py:379] Running loglikelihood requests +Token indices sequence length is longer than the specified maximum sequence length for this model (1333 > 1024). Running this sequence through the model will result in indexing errors +Running loglikelihood requests: 0%| | 0/25011 [00:00 +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._align import Align +from ._align_getter import align_getter +from ._column import ColumnDataProperty +from ._common import MAX_STRICT_LEVEL_MAP, MIN_STRICT_LEVEL_MAP, NOT_QUOTING_FLAGS, DefaultValue +from ._container import MinMaxContainer +from ._dataproperty import DataProperty +from ._extractor import DataPropertyExtractor, DataPropertyMatrix, MatrixFormatting +from ._formatter import Format +from ._function import calc_ascii_char_width, get_integer_digit, get_number_of_digit +from ._line_break import LineBreakHandling +from ._preprocessor import Preprocessor +from .logger import set_logger + + +__all__ = ( + "Align", + "align_getter", + "ColumnDataProperty", + "DataProperty", + "DataPropertyExtractor", + "DataPropertyMatrix", + "Format", + "LineBreakHandling", + "MatrixFormatting", + "MinMaxContainer", + "Preprocessor", + "calc_ascii_char_width", + "get_integer_digit", + "get_number_of_digit", + "MAX_STRICT_LEVEL_MAP", + "MIN_STRICT_LEVEL_MAP", + "NOT_QUOTING_FLAGS", + "DefaultValue", + "set_logger", + "__author__", + "__copyright__", + "__email__", + "__license__", + "__version__", +) diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c7ed118cb653a0994c788ced5c834b632abbbc1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__version__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a960e07b3e769ca44642114689bb36e89c36f56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8f78fd207dfb765eeea3935fc49aa4883e30f52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_align_getter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b88bc14157c9b32b72b9d080a368570adf034f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ee1fd9a76ce844726771d6fc549d375b5b3f01e Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_column.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2efe5331881febc0f44d3a4a5cb771f018a7372 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56f3edee121b415f1cdafca2d8d243f5e88da1e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_dataproperty.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f545b298674328a94d754967727c439a1afa67bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_extractor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d90b9e9b27658b37a018ed6573868bfb89837bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_formatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f7db3ca6edb89775c1d0303d2da97a2b7ad296 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4462994f15c21195d6b077179eb7d82973cb761 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_line_break.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fecb415c2842e70f28ea8db436584b8712f0e45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_preprocessor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54e97e0fac2958d3beb04a6a3e3095df920c6d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__version__.py b/venv/lib/python3.10/site-packages/dataproperty/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd082588129c73565cc49a474b6463dea4175bab --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2016, {__author__}" +__license__ = "MIT License" +__version__ = "1.0.1" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/venv/lib/python3.10/site-packages/dataproperty/_align.py b/venv/lib/python3.10/site-packages/dataproperty/_align.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8a318349fc4cdacf4397fcabc62b5fe3e2552a --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_align.py @@ -0,0 +1,25 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum + + +@enum.unique +class Align(enum.Enum): + AUTO = (1 << 0, "auto") + LEFT = (1 << 1, "left") + RIGHT = (1 << 2, "right") + CENTER = (1 << 3, "center") + + @property + def align_code(self) -> int: + return self.__align_code + + @property + def align_string(self) -> str: + return self.__align_string + + def __init__(self, code: int, string: str) -> None: + self.__align_code = code + self.__align_string = string diff --git a/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py b/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py new file mode 100644 index 0000000000000000000000000000000000000000..3550e7681cb74b699e0856bee5bad141ba288ca2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_align_getter.py @@ -0,0 +1,33 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from typing import Dict + +from typepy import Typecode + +from ._align import Align + + +class AlignGetter: + @property + def typecode_align_table(self): + raise NotImplementedError() + + @typecode_align_table.setter + def typecode_align_table(self, x: Dict[Typecode, Align]) -> None: + self.__typecode_align_table = x + + def get_align_from_typecode(self, typecode: Typecode) -> Align: + return self.__typecode_align_table.get(typecode, self.default_align) + + def __init__(self) -> None: + self.typecode_align_table = { + Typecode.STRING: Align.LEFT, + Typecode.INTEGER: Align.RIGHT, + Typecode.REAL_NUMBER: Align.RIGHT, + } + self.default_align = Align.LEFT + + +align_getter = AlignGetter() diff --git a/venv/lib/python3.10/site-packages/dataproperty/_base.py b/venv/lib/python3.10/site-packages/dataproperty/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..619fc3e2d3e3218ba6c474b4e762219aa16f643b --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_base.py @@ -0,0 +1,98 @@ +from typing import Dict, Optional, Type + +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + List, + Nan, + NoneType, + NullString, + RealNumber, + String, + Typecode, +) +from typepy.type import AbstractType + +from ._formatter import Formatter +from ._interface import DataPeropertyInterface + + +class DataPeropertyBase(DataPeropertyInterface): + __slots__ = ( + "_datetime_format_str", + "_decimal_places", + "_east_asian_ambiguous_width", + "_formatter", + "_typecode", + "__format_str", + ) + + __TYPE_CLASS_TABLE: Dict[Typecode, AbstractType] = { + Typecode.BOOL: Bool, + Typecode.DATETIME: DateTime, + Typecode.DICTIONARY: Dictionary, + Typecode.INTEGER: Integer, + Typecode.INFINITY: Infinity, + Typecode.IP_ADDRESS: IpAddress, + Typecode.LIST: List, + Typecode.NAN: Nan, + Typecode.NONE: NoneType, + Typecode.NULL_STRING: NullString, + Typecode.REAL_NUMBER: RealNumber, + Typecode.STRING: String, + } + + @property + def type_class(self) -> Type[AbstractType]: + return self.__TYPE_CLASS_TABLE[self.typecode] + + @property + def typecode(self) -> Typecode: + """ + ``typepy.Typecode`` that corresponds to the type of the ``data``. + + :return: + One of the Enum value that are defined ``typepy.Typecode``. + :rtype: typepy.Typecode + """ + + assert self._typecode + + return self._typecode + + @property + def typename(self) -> str: + return self.typecode.name + + def __init__( + self, + format_flags: Optional[int], + is_formatting_float: bool, + datetime_format_str: str, + east_asian_ambiguous_width: int, + ) -> None: + self._decimal_places: Optional[int] = None + self._east_asian_ambiguous_width = east_asian_ambiguous_width + self._typecode: Optional[Typecode] = None + + self._datetime_format_str = datetime_format_str + self.__format_str = "" + + self._formatter = Formatter( + format_flags=format_flags, + datetime_format_str=self._datetime_format_str, + is_formatting_float=is_formatting_float, + ) + + @property + def format_str(self) -> str: + if self.__format_str: + return self.__format_str + + self.__format_str = self._formatter.make_format_str(self.typecode, self.decimal_places) + + return self.__format_str diff --git a/venv/lib/python3.10/site-packages/dataproperty/_column.py b/venv/lib/python3.10/site-packages/dataproperty/_column.py new file mode 100644 index 0000000000000000000000000000000000000000..9026140f506e0381d1aab9b56d2863ace0e85dc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_column.py @@ -0,0 +1,352 @@ +from typing import Any, Dict, List, Optional + +from mbstrdecoder import MultiByteStrDecoder +from typepy import Integer, StrictLevel, Typecode, TypeConversionError + +from ._align import Align +from ._align_getter import align_getter +from ._base import DataPeropertyBase +from ._common import DefaultValue +from ._container import ListContainer, MinMaxContainer +from ._dataproperty import DataProperty +from ._function import calc_ascii_char_width +from .typing import FloatType + + +class ColumnDataProperty(DataPeropertyBase): + __slots__ = ( + "__header_ascii_char_width", + "__body_ascii_char_width", + "__column_index", + "__dp_list", + "__float_type", + "__format_map", + "__is_calculate", + "__max_precision", + "__minmax_integer_digits", + "__minmax_decimal_places", + "__minmax_additional_format_len", + "__typecode_bitmap", + ) + + @property + def align(self) -> Align: + return align_getter.get_align_from_typecode(self.typecode) + + @property + def bit_length(self) -> Optional[int]: + if self.typecode != Typecode.INTEGER: + return None + + bit_length = 0 + for value_dp in self.__dp_list: + try: + bit_length = max(bit_length, int.bit_length(value_dp.data)) + except TypeError: + pass + + return bit_length + + @property + def column_index(self) -> int: + return self.__column_index + + @property + def decimal_places(self) -> Optional[int]: + return self._decimal_places + + @property + def ascii_char_width(self) -> int: + return max(self.__header_ascii_char_width, self.__body_ascii_char_width) + + @property + def minmax_integer_digits(self) -> MinMaxContainer: + return self.__minmax_integer_digits + + @property + def minmax_decimal_places(self) -> ListContainer: + return self.__minmax_decimal_places + + @property + def minmax_additional_format_len(self) -> MinMaxContainer: + return self.__minmax_additional_format_len + + def __init__( + self, + column_index: int, + float_type: Optional[FloatType], + min_width: int = 0, + format_flags: Optional[int] = None, + is_formatting_float: bool = True, + datetime_format_str: str = DefaultValue.DATETIME_FORMAT, + east_asian_ambiguous_width: int = 1, + max_precision: int = DefaultValue.MAX_PRECISION, + ) -> None: + super().__init__( + format_flags=format_flags, + is_formatting_float=is_formatting_float, + datetime_format_str=datetime_format_str, + east_asian_ambiguous_width=east_asian_ambiguous_width, + ) + + self.__header_ascii_char_width = 0 + self.__body_ascii_char_width = min_width + self.__column_index = column_index + + self.__float_type = float_type + + self.__is_calculate = True + self.__dp_list: List[DataProperty] = [] + self.__minmax_integer_digits = MinMaxContainer() + self.__minmax_decimal_places = ListContainer() + self.__minmax_additional_format_len = MinMaxContainer() + self.__max_precision = max_precision + + self.__typecode_bitmap = Typecode.NONE.value + self.__calc_typecode_from_bitmap() + + self.__format_map: Dict[Typecode, str] = self._formatter.make_format_map( + decimal_places=self._decimal_places + ) + + def __repr__(self) -> str: + element_list = [] + + if self.column_index is not None: + element_list.append(f"column={self.column_index}") + + element_list.extend( + [ + f"type={self.typename}", + f"align={self.align.align_string}", + f"ascii_width={self.ascii_char_width}", + ] + ) + + if Integer(self.bit_length).is_type(): + element_list.append(f"bit_len={self.bit_length}") + + if self.minmax_integer_digits.has_value(): + if self.minmax_integer_digits.is_same_value(): + value = f"int_digits={self.minmax_integer_digits.min_value}" + else: + value = f"int_digits=({self.minmax_integer_digits})" + + element_list.append(value) + + if self.minmax_decimal_places.has_value(): + if self.minmax_decimal_places.is_same_value(): + value = f"decimal_places={self.minmax_decimal_places.min_value}" + else: + value = f"decimal_places=({self.minmax_decimal_places})" + + element_list.append(value) + + if not self.minmax_additional_format_len.is_zero(): + if self.minmax_additional_format_len.is_same_value(): + value = f"extra_len={self.minmax_additional_format_len.min_value}" + else: + value = f"extra_len=({self.minmax_additional_format_len})" + + element_list.append(value) + + return ", ".join(element_list) + + def dp_to_str(self, value_dp: DataProperty) -> str: + if value_dp.typecode == Typecode.STRING: + return str(value_dp.data) + + try: + value = self.__preprocess_value_before_tostring(value_dp) + except TypeConversionError: + return self.__format_map.get(value_dp.typecode, "{:s}").format(value_dp.data) + + to_string_format_str = self.__get_tostring_format(value_dp) + + try: + return to_string_format_str.format(value) + except (ValueError, TypeError): + pass + + try: + return MultiByteStrDecoder(value).unicode_str + except ValueError: + pass + + return str(value) + + def extend_width(self, ascii_char_width: int) -> None: + self.extend_header_width(ascii_char_width) + self.extend_body_width(ascii_char_width) + + def extend_header_width(self, ascii_char_width: int) -> None: + self.__header_ascii_char_width += ascii_char_width + + def extend_body_width(self, ascii_char_width: int) -> None: + self.__body_ascii_char_width += ascii_char_width + + def update_header(self, header_db: DataProperty) -> None: + self.__header_ascii_char_width = header_db.ascii_char_width + + def update_body(self, value_dp: DataProperty) -> None: + if value_dp.is_include_ansi_escape: + assert value_dp.no_ansi_escape_dp + value_dp = value_dp.no_ansi_escape_dp + + self.__typecode_bitmap |= value_dp.typecode.value + self.__calc_typecode_from_bitmap() + + if value_dp.typecode in (Typecode.REAL_NUMBER, Typecode.INTEGER): + self.__minmax_integer_digits.update(value_dp.integer_digits) + self.__minmax_decimal_places.update(value_dp.decimal_places) + self.__update_decimal_places() + + self.__minmax_additional_format_len.update(value_dp.additional_format_len) + + self.__dp_list.append(value_dp) + self.__update_ascii_char_width() + + def merge(self, column_dp: "ColumnDataProperty") -> None: + self.__typecode_bitmap |= column_dp.typecode.value + self.__calc_typecode_from_bitmap() + + self.__minmax_integer_digits.merge(column_dp.minmax_integer_digits) + self.__minmax_decimal_places.merge(column_dp.minmax_decimal_places) + self.__update_decimal_places() + + self.__minmax_additional_format_len.merge(column_dp.minmax_additional_format_len) + + self.__body_ascii_char_width = max(self.__body_ascii_char_width, column_dp.ascii_char_width) + self.__update_ascii_char_width() + + def begin_update(self) -> None: + self.__is_calculate = False + + def end_update(self) -> None: + self.__is_calculate = True + + self.__calc_typecode_from_bitmap() + self.__update_decimal_places() + self.__update_ascii_char_width() + + def __is_not_single_typecode(self, typecode_bitmap: int) -> bool: + return bool( + self.__typecode_bitmap & typecode_bitmap and self.__typecode_bitmap & ~typecode_bitmap + ) + + def __is_float_typecode(self) -> bool: + FLOAT_TYPECODE_BMP = ( + Typecode.REAL_NUMBER.value | Typecode.INFINITY.value | Typecode.NAN.value + ) + NUMBER_TYPECODE_BMP = FLOAT_TYPECODE_BMP | Typecode.INTEGER.value + + if self.__is_not_single_typecode(NUMBER_TYPECODE_BMP | Typecode.NULL_STRING.value): + return False + + if ( + bin(self.__typecode_bitmap & (FLOAT_TYPECODE_BMP | Typecode.NULL_STRING.value)).count( + "1" + ) + >= 2 + ): + return True + + if bin(self.__typecode_bitmap & NUMBER_TYPECODE_BMP).count("1") >= 2: + return True + + return False + + def __calc_body_ascii_char_width(self) -> int: + width_list = [self.__body_ascii_char_width] + + for value_dp in self.__dp_list: + if value_dp.is_include_ansi_escape: + assert value_dp.no_ansi_escape_dp + value_dp = value_dp.no_ansi_escape_dp + + width_list.append( + calc_ascii_char_width(self.dp_to_str(value_dp), self._east_asian_ambiguous_width) + ) + + return max(width_list) + + def __calc_decimal_places(self) -> Optional[int]: + if self.minmax_decimal_places.max_value is None: + return None + + return min(self.__max_precision, int(self.minmax_decimal_places.max_value)) + + def __get_tostring_format(self, value_dp: DataProperty) -> str: + if self.typecode == Typecode.STRING: + return self.__format_map.get(value_dp.typecode, "{:s}") + + return self.__format_map.get(self.typecode, "{:s}") + + def __get_typecode_from_bitmap(self) -> Typecode: + if self.__is_float_typecode(): + return Typecode.REAL_NUMBER + + if any( + [ + self.__is_not_single_typecode(Typecode.BOOL.value), + self.__is_not_single_typecode(Typecode.DATETIME.value), + ] + ): + return Typecode.STRING + + typecode_list = [ + Typecode.STRING, + Typecode.REAL_NUMBER, + Typecode.INTEGER, + Typecode.DATETIME, + Typecode.DICTIONARY, + Typecode.IP_ADDRESS, + Typecode.LIST, + Typecode.BOOL, + Typecode.INFINITY, + Typecode.NAN, + Typecode.NULL_STRING, + ] + + for typecode in typecode_list: + if self.__typecode_bitmap & typecode.value: + return typecode + + if self.__typecode_bitmap == Typecode.NONE.value: + return Typecode.NONE + + return Typecode.STRING + + def __update_ascii_char_width(self) -> None: + if not self.__is_calculate: + return + + self.__body_ascii_char_width = self.__calc_body_ascii_char_width() + + def __update_decimal_places(self) -> None: + if not self.__is_calculate: + return + + self._decimal_places = self.__calc_decimal_places() + self.__format_map = self._formatter.make_format_map(decimal_places=self._decimal_places) + + def __calc_typecode_from_bitmap(self) -> None: + if not self.__is_calculate: + return + + self._typecode = self.__get_typecode_from_bitmap() + + def __preprocess_value_before_tostring(self, value_dp: DataProperty) -> Any: + if self.typecode == value_dp.typecode or self.typecode in [ + Typecode.STRING, + Typecode.BOOL, + Typecode.DATETIME, + ]: + return value_dp.data + + return self.type_class( + value_dp.data, + strict_level=StrictLevel.MIN, + float_type=self.__float_type, + strip_ansi_escape=False, + ).convert() diff --git a/venv/lib/python3.10/site-packages/dataproperty/_common.py b/venv/lib/python3.10/site-packages/dataproperty/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..ff90a7c50bbf05c6257b3857f12284d6e16bc593 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_common.py @@ -0,0 +1,69 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import itertools +from datetime import datetime +from decimal import Decimal + +from typepy import StrictLevel, Typecode + +from .typing import StrictLevelMap, TypeValueMap + + +NOT_QUOTING_FLAGS = { + Typecode.BOOL: False, + Typecode.DATETIME: False, + Typecode.DICTIONARY: False, + Typecode.INFINITY: False, + Typecode.INTEGER: False, + Typecode.IP_ADDRESS: False, + Typecode.LIST: False, + Typecode.NAN: False, + Typecode.NULL_STRING: False, + Typecode.NONE: False, + Typecode.REAL_NUMBER: False, + Typecode.STRING: False, +} + +MAX_STRICT_LEVEL_MAP: StrictLevelMap = dict(itertools.product(list(Typecode), [StrictLevel.MAX])) +MIN_STRICT_LEVEL_MAP: StrictLevelMap = dict(itertools.product(list(Typecode), [StrictLevel.MIN])) + + +class DefaultValue: + DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z" + FLOAT_TYPE = Decimal + INF_VALUE = FLOAT_TYPE("inf") + NAN_VALUE = FLOAT_TYPE("nan") + + QUOTING_FLAGS = copy.deepcopy(NOT_QUOTING_FLAGS) + + STRICT_LEVEL_MAP: StrictLevelMap = { + "default": StrictLevel.MAX, + Typecode.BOOL: StrictLevel.MAX, + Typecode.DATETIME: StrictLevel.MAX, + Typecode.DICTIONARY: StrictLevel.MAX, + Typecode.REAL_NUMBER: 1, + Typecode.INFINITY: StrictLevel.MIN, + Typecode.INTEGER: 1, + Typecode.IP_ADDRESS: StrictLevel.MAX, + Typecode.LIST: StrictLevel.MAX, + Typecode.NAN: StrictLevel.MIN, + Typecode.NONE: StrictLevel.MAX, + Typecode.NULL_STRING: StrictLevel.MIN, + Typecode.STRING: StrictLevel.MIN, + } + + TYPE_VALUE_MAP: TypeValueMap = { + Typecode.NONE: None, + Typecode.INFINITY: INF_VALUE, + Typecode.NAN: NAN_VALUE, + } + + MAX_WORKERS = 1 + MAX_PRECISION = 100 + + +def default_datetime_formatter(value: datetime) -> str: + return value.strftime(DefaultValue.DATETIME_FORMAT) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_container.py b/venv/lib/python3.10/site-packages/dataproperty/_container.py new file mode 100644 index 0000000000000000000000000000000000000000..26f8b6280173f5c90416f33606499239ef97b808 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_container.py @@ -0,0 +1,193 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +from decimal import Decimal +from typing import Any, List, Optional, Sequence, Union + +from typepy import RealNumber + + +T = Union[int, float, Decimal] +NAN = Decimal("nan") + + +class AbstractContainer(metaclass=abc.ABCMeta): + @abc.abstractproperty + def min_value(self) -> Optional[Decimal]: # pragma: no cover + pass + + @abc.abstractproperty + def max_value(self) -> Optional[Decimal]: # pragma: no cover + pass + + @abc.abstractmethod + def mean(self) -> Decimal: # pragma: no cover + pass + + @abc.abstractmethod + def update(self, value: Optional[T]) -> None: # pragma: no cover + pass + + @abc.abstractmethod + def merge(self, value: "AbstractContainer") -> None: # pragma: no cover + pass + + def __repr__(self) -> str: + if not self.has_value(): + return "None" + + return ", ".join([f"min={self.min_value}", f"max={self.max_value}"]) + + def has_value(self) -> bool: + return self.min_value is not None and self.max_value is not None + + def is_same_value(self) -> bool: + return self.has_value() and self.min_value == self.max_value + + def is_zero(self) -> bool: + return self.has_value() and self.min_value == 0 and self.max_value == 0 + + +class ListContainer(AbstractContainer): + __slots__ = ("__value_list",) + + @property + def min_value(self) -> Optional[Decimal]: + try: + return min(self.__value_list) + except ValueError: + return None + + @property + def max_value(self) -> Optional[Decimal]: + try: + return max(self.__value_list) + except ValueError: + return None + + @property + def value_list(self) -> List[Decimal]: + return self.__value_list + + def __init__(self, value_list: Optional[List[Decimal]] = None) -> None: + if value_list is None: + self.__value_list: List[Decimal] = [] + return + + for value in value_list: + self.update(value) + + def mean(self) -> Decimal: + try: + return Decimal(sum(self.__value_list) / len(self.__value_list)) + except ZeroDivisionError: + return NAN + + def update(self, value: Union[int, float, Decimal, None]) -> None: + if value is None: + return + + store_value = RealNumber(value).try_convert() + if store_value is None: + return + + self.__value_list.append(store_value) + + def merge(self, value: "AbstractContainer") -> None: + if not isinstance(value, ListContainer): + return + + for v in value.value_list: + self.update(v) + + +class MinMaxContainer(AbstractContainer): + __slots__ = ("__min_value", "__max_value") + + def __init__(self, value_list: Optional[Sequence[Decimal]] = None) -> None: + self.__min_value: Optional[Decimal] = None + self.__max_value: Optional[Decimal] = None + + if value_list is None: + return + + for value in value_list: + self.update(value) + + @property + def min_value(self) -> Optional[Decimal]: + return self.__min_value + + @property + def max_value(self) -> Optional[Decimal]: + return self.__max_value + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, MinMaxContainer): + return False + + return all([self.min_value == other.min_value, self.max_value == other.max_value]) + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, MinMaxContainer): + return True + + return any([self.min_value != other.min_value, self.max_value != other.max_value]) + + def __contains__(self, x: T) -> bool: + if self.min_value is None: + return False + + if self.max_value is None: + return False + + return self.min_value <= x <= self.max_value + + def diff(self) -> Decimal: + if self.min_value is None: + return NAN + + if self.max_value is None: + return NAN + + try: + return self.max_value - self.min_value + except TypeError: + return NAN + + def mean(self) -> Decimal: + if self.min_value is None: + return NAN + + if self.max_value is None: + return NAN + + try: + return (self.max_value + self.min_value) * Decimal("0.5") + except TypeError: + return NAN + + def update(self, value: Optional[T]) -> None: + if value is None: + return + + decimal_value = Decimal(value) + + if self.__min_value is None: + self.__min_value = decimal_value + else: + self.__min_value = min(self.__min_value, decimal_value) + + if self.__max_value is None: + self.__max_value = decimal_value + else: + self.__max_value = max(self.__max_value, decimal_value) + + def merge(self, value: "AbstractContainer") -> None: + if not isinstance(value, MinMaxContainer): + return + + self.update(value.min_value) + self.update(value.max_value) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_converter.py b/venv/lib/python3.10/site-packages/dataproperty/_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..6c24895984535d9fe0521b7af5e1834dd6f6b80d --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_converter.py @@ -0,0 +1,90 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import Any, Dict, Optional + +from typepy import Typecode, TypeConversionError + +from ._common import MAX_STRICT_LEVEL_MAP, DefaultValue +from ._dataproperty import DataProperty +from ._preprocessor import Preprocessor +from .typing import DateTimeFormatter, FloatType, StrictLevelMap, TypeValueMap + + +class DataPropertyConverter: + __RE_QUOTE_LINE = re.compile(r"^\s*[\"'].*[\"']\s*$") # noqa: w605 + __RE_QUOTE_CHAR = re.compile("[\"']") + + def __init__( + self, + preprocessor: Preprocessor, + datetime_format_str: str, + datetime_formatter: Optional[DateTimeFormatter] = None, + type_value_map: Optional[TypeValueMap] = None, + quoting_flags: Optional[Dict[Typecode, bool]] = None, + float_type: Optional[FloatType] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> None: + self.__preprocessor = preprocessor + self.__type_value_map: TypeValueMap = ( + type_value_map if type_value_map else DefaultValue.TYPE_VALUE_MAP + ) + self.__quoting_flags: Dict[Typecode, bool] = ( + quoting_flags if quoting_flags else DefaultValue.QUOTING_FLAGS + ) + + self.__datetime_formatter = datetime_formatter + self.__datetime_format_str = datetime_format_str + self.__float_type = float_type + self.__strict_level_map = strict_level_map + + def convert(self, dp_value: DataProperty) -> DataProperty: + try: + return self.__create_dataproperty(self.__convert_value(dp_value)) + except TypeConversionError: + pass + + if not self.__quoting_flags.get(dp_value.typecode): + if self.__preprocessor.is_escape_html_tag: + return self.__create_dataproperty(dp_value.to_str()) + + return dp_value + + return self.__create_dataproperty(self.__apply_quote(dp_value.typecode, dp_value.to_str())) + + def __create_dataproperty(self, value: Any) -> DataProperty: + return DataProperty( + value, + preprocessor=self.__preprocessor, + float_type=self.__float_type, + datetime_format_str=self.__datetime_format_str, + strict_level_map=MAX_STRICT_LEVEL_MAP, + ) + + def __apply_quote(self, typecode: Typecode, data: Any) -> Any: + if not self.__quoting_flags.get(typecode): + return data + + try: + if self.__RE_QUOTE_LINE.search(data): + return data + except TypeError: + return data + + return '"{}"'.format(self.__RE_QUOTE_CHAR.sub('\\"', data.replace("\\", "\\\\"))) + + def __convert_value(self, dp_value: DataProperty) -> Any: + if dp_value.typecode in self.__type_value_map: + return self.__apply_quote(dp_value.typecode, self.__type_value_map[dp_value.typecode]) + + if dp_value.typecode == Typecode.DATETIME and self.__datetime_formatter: + try: + return self.__apply_quote( + dp_value.typecode, self.__datetime_formatter(dp_value.data) + ) + except TypeError: + raise TypeConversionError + + raise TypeConversionError("no need to convert") diff --git a/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py b/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py new file mode 100644 index 0000000000000000000000000000000000000000..4bc89bc6fbc108087d190126f08354ffd97e1708 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_dataproperty.py @@ -0,0 +1,382 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import typing +from decimal import Decimal +from typing import Any, Optional, cast + +import typepy +from mbstrdecoder import MultiByteStrDecoder +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + Nan, + NoneType, + NullString, + RealNumber, + StrictLevel, + String, + Typecode, + TypeConversionError, +) +from typepy.type import AbstractType + +from ._align import Align +from ._align_getter import align_getter +from ._base import DataPeropertyBase +from ._common import DefaultValue +from ._function import calc_ascii_char_width, get_number_of_digit +from ._preprocessor import Preprocessor +from .typing import FloatType, StrictLevelMap, TypeHint + + +class DataProperty(DataPeropertyBase): + __slots__ = ( + "__data", + "__no_ansi_escape_data", + "__align", + "__integer_digits", + "__additional_format_len", + "__length", + "__ascii_char_width", + ) + + __type_class_list: typing.List[AbstractType] = [ + NoneType, + Integer, + Infinity, + Nan, + IpAddress, + RealNumber, + Bool, + typepy.List, + Dictionary, + DateTime, + NullString, + String, + ] + + def __init__( + self, + data: Any, + preprocessor: Optional[Preprocessor] = None, + type_hint: TypeHint = None, + float_type: Optional[FloatType] = None, + format_flags: Optional[int] = None, + datetime_format_str: str = DefaultValue.DATETIME_FORMAT, + strict_level_map: Optional[StrictLevelMap] = None, + east_asian_ambiguous_width: int = 1, + ) -> None: + super().__init__( + format_flags=format_flags, + is_formatting_float=True, + datetime_format_str=datetime_format_str, + east_asian_ambiguous_width=east_asian_ambiguous_width, + ) + + self.__additional_format_len: Optional[int] = None + self.__align: Optional[Align] = None + self.__ascii_char_width: Optional[int] = None + self.__integer_digits: Optional[int] = None + self.__length: Optional[int] = None + + if preprocessor is None: + preprocessor = Preprocessor() + + data, no_ansi_escape_data = preprocessor.preprocess(data) + + self.__set_data(data, type_hint, float_type, strict_level_map) + + if no_ansi_escape_data is None or len(data) == len(no_ansi_escape_data): + self.__no_ansi_escape_data: Optional[DataProperty] = None + else: + self.__no_ansi_escape_data = DataProperty(no_ansi_escape_data, float_type=float_type) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, DataProperty): + return False + + if self.typecode != other.typecode: + return False + + if self.typecode == Typecode.NAN: + return True + + return self.data == other.data + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, DataProperty): + return True + + if self.typecode != other.typecode: + return True + + if self.typecode == Typecode.NAN: + return False + + return self.data != other.data + + def __repr__(self) -> str: + element_list = [] + + if self.typecode == Typecode.DATETIME: + element_list.append(f"data={str(self.data):s}") + else: + try: + element_list.append("data=" + self.to_str()) + except UnicodeEncodeError: + element_list.append(f"data={MultiByteStrDecoder(self.data).unicode_str}") + + element_list.extend( + [ + f"type={self.typename:s}", + f"align={self.align.align_string}", + f"ascii_width={self.ascii_char_width:d}", + ] + ) + + if Integer(self.length).is_type(): + element_list.append(f"length={self.length}") + + if Integer(self.integer_digits).is_type(): + element_list.append(f"int_digits={self.integer_digits}") + + if Integer(self.decimal_places).is_type(): + element_list.append(f"decimal_places={self.decimal_places}") + + if Integer(self.additional_format_len).is_type(): + element_list.append(f"extra_len={self.additional_format_len}") + + return ", ".join(element_list) + + @property + def align(self) -> Align: + if not self.__align: + if self.is_include_ansi_escape: + assert self.no_ansi_escape_dp + self.__align = self.no_ansi_escape_dp.align + else: + self.__align = align_getter.get_align_from_typecode(self.typecode) + + assert self.__align + + return self.__align + + @property + def decimal_places(self) -> Optional[int]: + """ + :return: + Decimal places if the ``data`` type either ``float`` or + ``decimal.Decimal``. Returns ``0`` if the ``data`` type is ``int``. + Otherwise, returns ``float("nan")``. + :rtype: int + """ + + if self._decimal_places is None: + self.__set_digit() + + return self._decimal_places + + @property + def data(self) -> Any: + """ + :return: Original data value. + :rtype: Original data type. + """ + + return self.__data + + @property + def is_include_ansi_escape(self) -> bool: + if self.no_ansi_escape_dp is None: + return False + + return self.length != self.no_ansi_escape_dp.length + + @property + def no_ansi_escape_dp(self) -> Optional["DataProperty"]: + return self.__no_ansi_escape_data + + @property + def length(self) -> Optional[int]: + """ + :return: Length of the ``data``. + :rtype: int + """ + + if self.__length is None: + self.__length = self.__get_length() + + return self.__length + + @property + def ascii_char_width(self) -> int: + if self.__ascii_char_width is None: + self.__ascii_char_width = self.__calc_ascii_char_width() + + return self.__ascii_char_width + + @property + def integer_digits(self) -> Optional[int]: + """ + :return: + Integer digits if the ``data`` type either + ``int``/``float``/``decimal.Decimal``. + Otherwise, returns ``None``. + :rtype: int + """ + + if self.__integer_digits is None: + self.__set_digit() + + return self.__integer_digits + + @property + def additional_format_len(self) -> int: + if self.__additional_format_len is None: + self.__additional_format_len = self.__get_additional_format_len() + + return self.__additional_format_len + + def get_padding_len(self, ascii_char_width: int) -> int: + if self.typecode in (Typecode.LIST, Typecode.DICTIONARY): + unicode_str_len = DataProperty(MultiByteStrDecoder(str(self.data)).unicode_str).length + assert unicode_str_len + return max( + ascii_char_width - (self.ascii_char_width - unicode_str_len), + 0, + ) + + try: + return max(ascii_char_width - (self.ascii_char_width - cast(int, self.length)), 0) + except TypeError: + return ascii_char_width + + def to_str(self) -> str: + return self.format_str.format(self.data) + + def __get_additional_format_len(self) -> int: + if not RealNumber(self.data, strip_ansi_escape=False).is_type(): + return 0 + + format_len = 0 + + if Decimal(self.data) < 0: + # for minus character + format_len += 1 + + return format_len + + def __get_base_float_len(self) -> int: + assert self.integer_digits is not None + assert self.decimal_places is not None + + if any([self.integer_digits < 0, self.decimal_places < 0]): + raise ValueError("integer digits and decimal places must be greater or equals to zero") + + float_len = self.integer_digits + self.decimal_places + if self.decimal_places > 0: + # for dot + float_len += 1 + + return float_len + + def __get_length(self) -> Optional[int]: + if self.typecode in (Typecode.DICTIONARY, Typecode.LIST, Typecode.STRING): + return len(self.data) + + return None + + def __calc_ascii_char_width(self) -> int: + if self.typecode == Typecode.INTEGER: + return cast(int, self.integer_digits) + self.additional_format_len + + if self.typecode == Typecode.REAL_NUMBER: + return self.__get_base_float_len() + self.additional_format_len + + if self.typecode == Typecode.DATETIME: + try: + return len(self.to_str()) + except ValueError: + # reach to this line if the year <1900. + # the datetime strftime() methods require year >= 1900. + return len(str(self.data)) + + if self.is_include_ansi_escape: + assert self.no_ansi_escape_dp + return self.no_ansi_escape_dp.ascii_char_width + + try: + unicode_str = MultiByteStrDecoder(self.data).unicode_str + except ValueError: + unicode_str = self.to_str() + + return calc_ascii_char_width(unicode_str, self._east_asian_ambiguous_width) + + def __set_data( + self, + data: Any, + type_hint: TypeHint, + float_type: Optional[FloatType], + strict_level_map: Optional[StrictLevelMap], + ) -> None: + if float_type is None: + float_type = DefaultValue.FLOAT_TYPE + + if strict_level_map is None: + strict_level_map = DefaultValue.STRICT_LEVEL_MAP + + if type_hint: + type_obj = type_hint( + data, strict_level=StrictLevel.MIN, float_type=float_type, strip_ansi_escape=False + ) + self._typecode = type_obj.typecode + self.__data = type_obj.try_convert() + + if type_hint( + self.__data, + strict_level=StrictLevel.MAX, + float_type=float_type, + strip_ansi_escape=False, + ).is_type(): + return + + for type_class in self.__type_class_list: + strict_level = strict_level_map.get( + type_class(None).typecode, strict_level_map.get("default", StrictLevel.MAX) + ) + + if self.__try_convert_type(data, type_class, strict_level, float_type): + return + + raise TypeConversionError( + f"failed to convert: data={data}, strict_level={strict_level_map}" + ) + + def __set_digit(self) -> None: + integer_digits, decimal_places = get_number_of_digit(self.__data) + self.__integer_digits = integer_digits + self._decimal_places = decimal_places + + def __try_convert_type( + self, + data: Any, + type_class: AbstractType, + strict_level: int, + float_type: Optional[FloatType], + ) -> bool: + type_obj = type_class(data, strict_level, float_type=float_type, strip_ansi_escape=False) + + try: + self.__data = type_obj.convert() + except TypeConversionError: + return False + + self._typecode = type_obj.typecode + + return True diff --git a/venv/lib/python3.10/site-packages/dataproperty/_extractor.py b/venv/lib/python3.10/site-packages/dataproperty/_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..e364e9fdc9b95dfba7cdfc3a2728eb62505ae4e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_extractor.py @@ -0,0 +1,814 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import enum +import sys +import typing +from collections import Counter +from decimal import Decimal +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union, cast + +import typepy +from typepy import ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + Nan, + NoneType, + NullString, + RealNumber, + StrictLevel, + String, + Typecode, + is_empty_sequence, +) +from typepy.type import AbstractType + +from ._column import ColumnDataProperty +from ._common import MIN_STRICT_LEVEL_MAP, DefaultValue +from ._converter import DataPropertyConverter +from ._dataproperty import DataProperty +from ._formatter import Format +from ._preprocessor import Preprocessor +from .logger import logger +from .typing import ( + DateTimeFormatter, + StrictLevelMap, + TransFunc, + TypeHint, + TypeValueMap, + normalize_type_hint, +) + + +DataPropertyMatrix = List[List[DataProperty]] + + +@enum.unique +class MatrixFormatting(enum.Enum): + # raise exception if the matrix is not properly formatted + EXCEPTION = 1 << 1 + + # trim to the minimum size column + TRIM = 1 << 2 + + # Append None values to columns so that it is the same as the maximum + # column size. + FILL_NONE = 1 << 3 + + HEADER_ALIGNED = 1 << 4 + + +class DataPropertyExtractor: + """ + .. py:attribute:: quoting_flags + + Configurations to add double quote to for each items in a matrix, + where |Typecode| of table-value is |True| in the ``quote_flag_table`` + mapping table. ``quote_flag_table`` should be a dictionary. + And is ``{ Typecode : bool }``. Defaults to: + + .. code-block:: json + :caption: The default values + + { + Typecode.BOOL: False, + Typecode.DATETIME: False, + Typecode.DICTIONARY: False, + Typecode.INFINITY: False, + Typecode.INTEGER: False, + Typecode.IP_ADDRESS: False, + Typecode.LIST: False, + Typecode.NAN: False, + Typecode.NULL_STRING: False, + Typecode.NONE: False, + Typecode.REAL_NUMBER: False, + Typecode.STRING: False, + } + """ + + def __init__(self, max_precision: Optional[int] = None) -> None: + self.max_workers = DefaultValue.MAX_WORKERS + + if max_precision is None: + self.__max_precision = DefaultValue.MAX_PRECISION + else: + self.__max_precision = max_precision + + self.__headers: Sequence[str] = [] + self.__default_type_hint: TypeHint = None + self.__col_type_hints: List[TypeHint] = [] + + self.__strip_str_header: Optional[str] = None + self.__is_formatting_float = True + self.__min_col_ascii_char_width = 0 + self.__default_format_flags = Format.NONE + self.__format_flags_list: Sequence[int] = [] + self.__float_type: Union[Type[float], Type[Decimal], None] = None + self.__datetime_format_str = DefaultValue.DATETIME_FORMAT + self.__strict_level_map = copy.deepcopy( + cast(Dict[Union[Typecode, str], int], DefaultValue.STRICT_LEVEL_MAP) + ) + self.__east_asian_ambiguous_width = 1 + + self.__preprocessor = Preprocessor() + + self.__type_value_map: Mapping[Typecode, Union[float, Decimal, None]] = copy.deepcopy( + DefaultValue.TYPE_VALUE_MAP + ) + + self.__trans_func_list: List[TransFunc] = [] + self.__quoting_flags = copy.deepcopy(DefaultValue.QUOTING_FLAGS) + self.__datetime_formatter: Optional[DateTimeFormatter] = None + self.__matrix_formatting = MatrixFormatting.TRIM + self.__dp_converter: DataPropertyConverter + + self.__clear_cache() + + def __clear_cache(self) -> None: + self.__update_dp_converter() + self.__dp_cache_zero = self.__to_dp_raw(0) + self.__dp_cache_one = self.__to_dp_raw(1) + self.__dp_cache_true = self.__to_dp_raw(True) + self.__dp_cache_false = self.__to_dp_raw(False) + self.__dp_cache_map = {None: self.__to_dp_raw(None), "": self.__to_dp_raw("")} + + @property + def headers(self) -> Sequence[str]: + return self.__headers + + @headers.setter + def headers(self, value: Sequence[str]) -> None: + if self.__headers == value: + return + + self.__headers = value + self.__clear_cache() + + @property + def default_type_hint(self) -> TypeHint: + return self.__default_type_hint + + @default_type_hint.setter + def default_type_hint(self, value: TypeHint) -> None: + if self.__default_type_hint == value: + return + + self.__default_type_hint = value + self.__clear_cache() + + @property + def column_type_hints(self) -> List[TypeHint]: + return self.__col_type_hints + + @column_type_hints.setter + def column_type_hints(self, value: Sequence[Union[str, TypeHint]]) -> None: + normalized_type_hints: List[TypeHint] = [] + + for type_hint in value: + type_hint = normalize_type_hint(type_hint) + if type_hint not in ( + Bool, + DateTime, + Dictionary, + Infinity, + Integer, + IpAddress, + typepy.List, + Nan, + NoneType, + RealNumber, + String, + NullString, + None, + ): + raise ValueError(f"invalid type hint: {type(type_hint)}") + + normalized_type_hints.append(type_hint) + + if self.__col_type_hints == normalized_type_hints: + return + + self.__col_type_hints = normalized_type_hints + self.__clear_cache() + + @property + def is_formatting_float(self) -> bool: + return self.__is_formatting_float + + @is_formatting_float.setter + def is_formatting_float(self, value: bool) -> None: + self.__is_formatting_float = value + + @property + def max_precision(self) -> int: + return self.__max_precision + + @max_precision.setter + def max_precision(self, value: int) -> None: + if self.__max_precision == value: + return + + self.__max_precision = value + self.__clear_cache() + + @property + def preprocessor(self) -> Preprocessor: + return self.__preprocessor + + @preprocessor.setter + def preprocessor(self, value: Preprocessor) -> None: + if self.preprocessor == value: + return + + self.__preprocessor = value + self.__update_dp_converter() + + @property + def strip_str_header(self) -> Optional[str]: + return self.__strip_str_header + + @strip_str_header.setter + def strip_str_header(self, value: str) -> None: + if self.__strip_str_header == value: + return + + self.__strip_str_header = value + self.__clear_cache() + + @property + def min_column_width(self) -> int: + return self.__min_col_ascii_char_width + + @min_column_width.setter + def min_column_width(self, value: int) -> None: + if self.__min_col_ascii_char_width == value: + return + + self.__min_col_ascii_char_width = value + self.__clear_cache() + + @property + def default_format_flags(self) -> int: + return self.__default_format_flags + + @default_format_flags.setter + def default_format_flags(self, value: int) -> None: + if self.__default_format_flags == value: + return + + self.__default_format_flags = value + self.__clear_cache() + + @property + def format_flags_list(self) -> Sequence[int]: + return self.__format_flags_list + + @format_flags_list.setter + def format_flags_list(self, value: Sequence[int]) -> None: + if self.__format_flags_list == value: + return + + self.__format_flags_list = value + self.__clear_cache() + + @property + def float_type(self) -> Union[Type[float], Type[Decimal], None]: + return self.__float_type + + @float_type.setter + def float_type(self, value: Union[Type[float], Type[Decimal]]) -> None: + if self.__float_type == value: + return + + self.__float_type = value + self.__clear_cache() + + @property + def datetime_format_str(self) -> str: + return self.__datetime_format_str + + @datetime_format_str.setter + def datetime_format_str(self, value: str) -> None: + if self.__datetime_format_str == value: + return + + self.__datetime_format_str = value + self.__clear_cache() + + @property + def strict_level_map(self) -> StrictLevelMap: + return self.__strict_level_map + + @strict_level_map.setter + def strict_level_map(self, value: StrictLevelMap) -> None: + if self.__strict_level_map == value: + return + + self.__strict_level_map = cast(Dict[Union[Typecode, str], int], value) + self.__clear_cache() + + @property + def east_asian_ambiguous_width(self) -> int: + return self.__east_asian_ambiguous_width + + @east_asian_ambiguous_width.setter + def east_asian_ambiguous_width(self, value: int) -> None: + if self.__east_asian_ambiguous_width == value: + return + + self.__east_asian_ambiguous_width = value + self.__clear_cache() + + @property + def type_value_map(self) -> TypeValueMap: + return self.__type_value_map + + @type_value_map.setter + def type_value_map(self, value: TypeValueMap) -> None: + if self.__type_value_map == value: + return + + self.__type_value_map = value + self.__clear_cache() + + def register_trans_func(self, trans_func: TransFunc) -> None: + self.__trans_func_list.insert(0, trans_func) + self.__clear_cache() + + @property + def quoting_flags(self) -> Dict[Typecode, bool]: + return self.__quoting_flags + + @quoting_flags.setter + def quoting_flags(self, value: Dict[Typecode, bool]) -> None: + if self.__quoting_flags == value: + return + + self.__quoting_flags = value + self.__clear_cache() + + @property + def datetime_formatter(self) -> Optional[DateTimeFormatter]: + return self.__datetime_formatter + + @datetime_formatter.setter + def datetime_formatter(self, value: Optional[DateTimeFormatter]) -> None: + if self.__datetime_formatter == value: + return + + self.__datetime_formatter = value + self.__clear_cache() + + @property + def matrix_formatting(self) -> MatrixFormatting: + return self.__matrix_formatting + + @matrix_formatting.setter + def matrix_formatting(self, value: MatrixFormatting) -> None: + if self.__matrix_formatting == value: + return + + self.__matrix_formatting = value + self.__clear_cache() + + @property + def max_workers(self) -> int: + assert self.__max_workers + + return self.__max_workers + + @max_workers.setter + def max_workers(self, value: Optional[int]) -> None: + try: + from _multiprocessing import SemLock, sem_unlink # noqa + except ImportError: + logger.debug("This platform lacks a functioning sem_open implementation") + value = 1 + + if "pytest" in sys.modules and value != 1: + logger.debug("set max_workers to 1 to avoid deadlock when executed from pytest") + value = 1 + + self.__max_workers = value + if not self.__max_workers: + self.__max_workers = DefaultValue.MAX_WORKERS + + def to_dp(self, value: Any) -> DataProperty: + self.__update_dp_converter() + + return self.__to_dp(value) + + def to_dp_list(self, values: Sequence[Any]) -> List[DataProperty]: + if is_empty_sequence(values): + return [] + + self.__update_dp_converter() + + return self._to_dp_list(values) + + def to_column_dp_list( + self, + value_dp_matrix: Any, + previous_column_dp_list: Optional[Sequence[ColumnDataProperty]] = None, + ) -> List[ColumnDataProperty]: + col_dp_list = self.__get_col_dp_list_base() + + logger.debug("converting to column dataproperty:") + + logs = [" params:"] + if self.headers: + logs.append(f" headers={len(self.headers)}") + logs.extend( + [ + " prev_col_count={}".format( + len(previous_column_dp_list) if previous_column_dp_list else None + ), + f" matrix_formatting={self.matrix_formatting}", + ] + ) + if self.column_type_hints: + logs.append( + " column_type_hints=({})".format( + ", ".join( + [ + type_hint.__name__ if type_hint else "none" + for type_hint in self.column_type_hints + ] + ) + ) + ) + else: + logs.append(" column_type_hints=()") + + for log in logs: + logger.debug(log) + + logger.debug(" results:") + for col_idx, value_dp_list in enumerate(zip(*value_dp_matrix)): + try: + col_dp_list[col_idx] + except IndexError: + col_dp_list.append( + ColumnDataProperty( + column_index=col_idx, + float_type=self.float_type, + min_width=self.min_column_width, + format_flags=self.__get_format_flags(col_idx), + is_formatting_float=self.is_formatting_float, + datetime_format_str=self.datetime_format_str, + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + max_precision=self.__max_precision, + ) + ) + + col_dp = col_dp_list[col_idx] + col_dp.begin_update() + + try: + col_dp.merge(previous_column_dp_list[col_idx]) # type: ignore + except (TypeError, IndexError): + pass + + for value_dp in value_dp_list: + col_dp.update_body(value_dp) + + col_dp.end_update() + + logger.debug(f" {str(col_dp):s}") + + return col_dp_list + + def to_dp_matrix(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + self.__update_dp_converter() + logger.debug(f"max_workers={self.max_workers}, preprocessor={self.__preprocessor}") + + value_matrix = self.__strip_data_matrix(value_matrix) + + if self.__is_dp_matrix(value_matrix): + logger.debug("already a dataproperty matrix") + return value_matrix # type: ignore + + if self.max_workers <= 1: + return self.__to_dp_matrix_st(value_matrix) + + return self.__to_dp_matrix_mt(value_matrix) + + def to_header_dp_list(self) -> List[DataProperty]: + self.__update_dp_converter() + + preprocessor = copy.deepcopy(self.__preprocessor) + preprocessor.strip_str = self.strip_str_header + + return self._to_dp_list( + self.headers, + type_hint=String, + preprocessor=preprocessor, + strict_level_map=MIN_STRICT_LEVEL_MAP, + ) + + def update_preprocessor(self, **kwargs: Any) -> bool: + is_updated = self.__preprocessor.update(**kwargs) + self.__update_dp_converter() + + return is_updated + + def update_strict_level_map(self, value: StrictLevelMap) -> bool: + org = copy.deepcopy(self.__strict_level_map) + self.__strict_level_map.update(value) + + if org == self.__strict_level_map: + return False + + self.__clear_cache() + + return True + + """ + def update_dict(self, lhs: Mapping, rhs: Mapping) -> bool: + is_updated = False + + for key, value in rhs.items(): + if key not in lhs: + lhs[] + continue + + if getattr(lhs, key) == value: + continue + + setattr(lhs, key, value) + is_updated = True + + return is_updated + """ + + @staticmethod + def __is_dp_matrix(value: Any) -> bool: + try: + return isinstance(value[0][0], DataProperty) + except (TypeError, IndexError): + return False + + def __get_col_type_hint(self, col_idx: int) -> TypeHint: + try: + return self.column_type_hints[col_idx] + except (TypeError, IndexError): + return self.default_type_hint + + def __get_format_flags(self, col_idx: int) -> int: + try: + return self.format_flags_list[col_idx] + except (TypeError, IndexError): + return self.__default_format_flags + + def __to_dp( + self, + data: Any, + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> DataProperty: + for trans_func in self.__trans_func_list: + data = trans_func(data) + + if type_hint: + return self.__to_dp_raw( + data, + type_hint=type_hint, + preprocessor=preprocessor, + strict_level_map=strict_level_map, + ) + + try: + if data in self.__dp_cache_map: + return self.__dp_cache_map[data] + except TypeError: + # unhashable type + pass + + if data == 0: + if data is False: + return self.__dp_cache_false + return self.__dp_cache_zero + if data == 1: + if data is True: + return self.__dp_cache_true + return self.__dp_cache_one + + return self.__to_dp_raw( + data, type_hint=type_hint, preprocessor=preprocessor, strict_level_map=strict_level_map + ) + + def __to_dp_raw( + self, + data: Any, + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> DataProperty: + if preprocessor: + preprocessor = Preprocessor( + dequote=preprocessor.dequote, + line_break_handling=preprocessor.line_break_handling, + line_break_repl=preprocessor.line_break_repl, + strip_str=preprocessor.strip_str, + is_escape_formula_injection=preprocessor.is_escape_formula_injection, + ) + else: + preprocessor = Preprocessor( + dequote=self.preprocessor.dequote, + line_break_handling=self.preprocessor.line_break_handling, + line_break_repl=self.preprocessor.line_break_repl, + strip_str=self.preprocessor.strip_str, + is_escape_formula_injection=self.__preprocessor.is_escape_formula_injection, + ) + + value_dp = DataProperty( + data, + preprocessor=preprocessor, + type_hint=(type_hint if type_hint is not None else self.default_type_hint), + float_type=self.float_type, + datetime_format_str=self.datetime_format_str, + strict_level_map=(strict_level_map if type_hint is not None else self.strict_level_map), + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + ) + + return self.__dp_converter.convert(value_dp) + + def __to_dp_matrix_st(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + return list( + zip( # type: ignore + *( + _to_dp_list_helper( + self, + col_idx, + values, + self.__get_col_type_hint(col_idx), + self.__preprocessor, + )[1] + for col_idx, values in enumerate(zip(*value_matrix)) + ) + ) + ) + + def __to_dp_matrix_mt(self, value_matrix: Sequence[Sequence[Any]]) -> DataPropertyMatrix: + from concurrent import futures + + col_data_map = {} + + with futures.ProcessPoolExecutor(self.max_workers) as executor: + future_list = [ + executor.submit( + _to_dp_list_helper, + self, + col_idx, + values, + self.__get_col_type_hint(col_idx), + self.__preprocessor, + ) + for col_idx, values in enumerate(zip(*value_matrix)) + ] + + for future in futures.as_completed(future_list): + col_idx, value_dp_list = future.result() + col_data_map[col_idx] = value_dp_list + + return list( + zip(*(col_data_map[col_idx] for col_idx in sorted(col_data_map))) # type: ignore + ) + + def _to_dp_list( + self, + data_list: Sequence[Any], + type_hint: TypeHint = None, + preprocessor: Optional[Preprocessor] = None, + strict_level_map: Optional[StrictLevelMap] = None, + ) -> List[DataProperty]: + if is_empty_sequence(data_list): + return [] + + type_counter: typing.Counter[Type[AbstractType]] = Counter() + + dp_list = [] + for data in data_list: + expect_type_hint: TypeHint = type_hint + if type_hint is None: + try: + expect_type_hint, _count = type_counter.most_common(1)[0] + if not expect_type_hint( + data, float_type=self.float_type, strict_level=StrictLevel.MAX + ).is_type(): + expect_type_hint = None + except IndexError: + pass + + dataprop = self.__to_dp( + data=data, + type_hint=expect_type_hint, + preprocessor=preprocessor if preprocessor else self.__preprocessor, + strict_level_map=strict_level_map, + ) + type_counter[dataprop.type_class] += 1 + + dp_list.append(dataprop) + + return dp_list + + def __strip_data_matrix(self, data_matrix: Sequence[Sequence[Any]]) -> Sequence[Sequence[Any]]: + header_col_size = len(self.headers) if self.headers else 0 + try: + col_size_list = [len(data_list) for data_list in data_matrix] + except TypeError: + return [] + + if self.headers: + min_col_size = min([header_col_size] + col_size_list) + max_col_size = max([header_col_size] + col_size_list) + elif col_size_list: + min_col_size = min(col_size_list) + max_col_size = max(col_size_list) + else: + min_col_size = 0 + max_col_size = 0 + + if self.matrix_formatting == MatrixFormatting.EXCEPTION: + if min_col_size != max_col_size: + raise ValueError( + "nonuniform column size found: min={}, max={}".format( + min_col_size, max_col_size + ) + ) + + return data_matrix + + if self.matrix_formatting == MatrixFormatting.HEADER_ALIGNED: + if header_col_size > 0: + format_col_size = header_col_size + else: + format_col_size = max_col_size + elif self.matrix_formatting == MatrixFormatting.TRIM: + format_col_size = min_col_size + elif self.matrix_formatting == MatrixFormatting.FILL_NONE: + format_col_size = max_col_size + else: + raise ValueError(f"unknown matrix formatting: {self.matrix_formatting}") + + return [ + list(data_matrix[row_idx][:format_col_size]) + [None] * (format_col_size - col_size) + for row_idx, col_size in enumerate(col_size_list) + ] + + def __get_col_dp_list_base(self) -> List[ColumnDataProperty]: + header_dp_list = self.to_header_dp_list() + col_dp_list = [] + + for col_idx, header_dp in enumerate(header_dp_list): + col_dp = ColumnDataProperty( + column_index=col_idx, + float_type=self.float_type, + min_width=self.min_column_width, + format_flags=self.__get_format_flags(col_idx), + is_formatting_float=self.is_formatting_float, + datetime_format_str=self.datetime_format_str, + east_asian_ambiguous_width=self.east_asian_ambiguous_width, + max_precision=self.__max_precision, + ) + col_dp.update_header(header_dp) + col_dp_list.append(col_dp) + + return col_dp_list + + def __update_dp_converter(self) -> None: + preprocessor = Preprocessor( + line_break_handling=self.__preprocessor.line_break_handling, + line_break_repl=self.preprocessor.line_break_repl, + is_escape_html_tag=self.__preprocessor.is_escape_html_tag, + is_escape_formula_injection=self.__preprocessor.is_escape_formula_injection, + ) + self.__dp_converter = DataPropertyConverter( + preprocessor=preprocessor, + type_value_map=self.type_value_map, + quoting_flags=self.quoting_flags, + datetime_formatter=self.datetime_formatter, + datetime_format_str=self.datetime_format_str, + float_type=self.float_type, + strict_level_map=self.strict_level_map, + ) + + +def _to_dp_list_helper( + extractor: DataPropertyExtractor, + col_idx: int, + data_list: Sequence[Any], + type_hint: TypeHint, + preprocessor: Preprocessor, +) -> Tuple[int, List[DataProperty]]: + return ( + col_idx, + extractor._to_dp_list(data_list, type_hint=type_hint, preprocessor=preprocessor), + ) diff --git a/venv/lib/python3.10/site-packages/dataproperty/_function.py b/venv/lib/python3.10/site-packages/dataproperty/_function.py new file mode 100644 index 0000000000000000000000000000000000000000..e69003950d189c012cc8f683cf26a0c86a24684a --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_function.py @@ -0,0 +1,112 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import decimal +import re +from decimal import Decimal +from typing import Any, Optional, Tuple, Union + +from typepy import Integer, RealNumber, TypeConversionError + + +decimal.setcontext(decimal.Context(prec=60, rounding=decimal.ROUND_HALF_DOWN)) + +_ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]", re.IGNORECASE) + + +def get_integer_digit(value: Any) -> int: + float_type = RealNumber(value) + + try: + abs_value = abs(float_type.convert()) + except TypeConversionError: + try: + abs_value = abs(Integer(value).convert()) + except TypeConversionError: + raise ValueError(f"the value must be a number: value='{value}' type='{type(value)}'") + + return len(str(abs_value)) + + if abs_value.is_zero(): + return 1 + + try: + return len(str(abs_value.quantize(Decimal("1."), rounding=decimal.ROUND_DOWN))) + except decimal.InvalidOperation: + return len(str(abs_value)) + + +class DigitCalculator: + REGEXP_COMMON_LOG = re.compile(r"[\d\.]+[eE]\-\d+") + REGEXP_SPLIT = re.compile(r"[eE]\-") + + def get_decimal_places(self, value: Union[str, float, int, Decimal]) -> int: + if Integer(value).is_type(): + return 0 + + float_digit_len = 0 + abs_value = abs(float(value)) + text_value = str(abs_value) + float_text = "0" + if text_value.find(".") != -1: + float_text = text_value.split(".")[1] + float_digit_len = len(float_text) + elif self.REGEXP_COMMON_LOG.search(text_value): + float_text = self.REGEXP_SPLIT.split(text_value)[1] + float_digit_len = int(float_text) + + return float_digit_len + + +_digit_calculator = DigitCalculator() + + +def get_number_of_digit( + value: Any, max_decimal_places: int = 99 +) -> Tuple[Optional[int], Optional[int]]: + try: + integer_digits = get_integer_digit(value) + except (ValueError, TypeError, OverflowError): + return (None, None) + + try: + decimal_places: Optional[int] = min( + _digit_calculator.get_decimal_places(value), max_decimal_places + ) + except (ValueError, TypeError): + decimal_places = None + + return (integer_digits, decimal_places) + + +def _validate_eaaw(east_asian_ambiguous_width: int) -> None: + if east_asian_ambiguous_width in (1, 2): + return + + raise ValueError( + "invalid east_asian_ambiguous_width: expected=1 or 2, actual={}".format( + east_asian_ambiguous_width + ) + ) + + +def strip_ansi_escape(unicode_str: str) -> str: + return _ansi_escape.sub("", unicode_str) + + +def calc_ascii_char_width(unicode_str: str, east_asian_ambiguous_width: int = 1) -> int: + import unicodedata + + width = 0 + for char in unicode_str: + char_width = unicodedata.east_asian_width(char) + if char_width in "WF": + width += 2 + elif char_width == "A": + _validate_eaaw(east_asian_ambiguous_width) + width += east_asian_ambiguous_width + else: + width += 1 + + return width diff --git a/venv/lib/python3.10/site-packages/dataproperty/_line_break.py b/venv/lib/python3.10/site-packages/dataproperty/_line_break.py new file mode 100644 index 0000000000000000000000000000000000000000..e98d3e830b8174a7f784067d28cc74ee0d92f8f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_line_break.py @@ -0,0 +1,8 @@ +from enum import Enum, unique + + +@unique +class LineBreakHandling(Enum): + NOP = 0 + REPLACE = 1 + ESCAPE = 2 diff --git a/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py b/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..6b810bba4b812cd34b1d2bb833e26731d8e5b4b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/_preprocessor.py @@ -0,0 +1,173 @@ +import html +import re +from typing import Any, Optional, Tuple, Union + +from mbstrdecoder import MultiByteStrDecoder + +from ._function import strip_ansi_escape +from ._line_break import LineBreakHandling + + +_RE_LINE_BREAK = re.compile(r"\r\n|\n") +_RE_FORMULA_PREFIX = re.compile(r"^[-\+=@]") + + +def normalize_lbh(value: Optional[LineBreakHandling]) -> LineBreakHandling: + if isinstance(value, LineBreakHandling): + return value + + if value is None: + return LineBreakHandling.NOP + + return LineBreakHandling[value.upper()] # type: ignore + + +class Preprocessor: + @property + def line_break_handling(self) -> Optional[LineBreakHandling]: + return self.__line_break_handling + + @line_break_handling.setter + def line_break_handling(self, value: Optional[LineBreakHandling]) -> None: + self.__line_break_handling = normalize_lbh(value) + + def __init__( + self, + strip_str: Optional[Union[str, bytes]] = None, + replace_tabs_with_spaces: bool = True, + tab_length: int = 2, + line_break_handling: Optional[LineBreakHandling] = None, + line_break_repl: str = " ", + dequote: bool = False, + is_escape_html_tag: bool = False, + is_escape_formula_injection: bool = False, + ) -> None: + self.strip_str = strip_str + self.replace_tabs_with_spaces = replace_tabs_with_spaces + self.tab_length = tab_length + self.line_break_handling = line_break_handling + self.line_break_repl = line_break_repl + self.dequote = dequote + self.is_escape_html_tag = is_escape_html_tag + self.is_escape_formula_injection = is_escape_formula_injection + + def __repr__(self) -> str: + return ", ".join( + [ + f"strip_str={self.strip_str!r}", + f"replace_tabs_with_spaces={self.replace_tabs_with_spaces}", + f"tab_length={self.tab_length}", + f"line_break_handling={self.line_break_handling}", + f"line_break_repl={self.line_break_repl}", + f"escape_html_tag={self.is_escape_html_tag}", + f"escape_formula_injection={self.is_escape_formula_injection}", + ] + ) + + def preprocess(self, data: Any) -> Tuple: + data, no_ansi_escape_data = self.__preprocess_string( + self.__preprocess_data(data, self.strip_str), + ) + return (data, no_ansi_escape_data) + + def update(self, **kwargs: Any) -> bool: + is_updated = False + + for key, value in kwargs.items(): + if not hasattr(self, key): + continue + + if getattr(self, key) == value: + continue + + setattr(self, key, value) + is_updated = True + + return is_updated + + def __preprocess_string(self, raw_data: Any) -> Tuple[Any, Optional[str]]: + data = raw_data + + if not isinstance(data, str): + return (data, None) + + if self.replace_tabs_with_spaces: + try: + data = data.replace("\t", " " * self.tab_length) + except (TypeError, AttributeError, ValueError): + pass + + if self.is_escape_html_tag: + try: + data = html.escape(data) + except AttributeError: + return (data, None) + + data = self.__process_line_break(data) + data = self.__escape_formula_injection(data) + data = self.__dequote(data) + + try: + return (data, strip_ansi_escape(data)) + except TypeError: + return (data, None) + + @staticmethod + def __preprocess_data(data: Any, strip_str: Optional[Union[str, bytes]]) -> Any: + if strip_str is None: + return data + + try: + return data.strip(strip_str) + except AttributeError: + return data + except UnicodeDecodeError: + return MultiByteStrDecoder(data).unicode_str.strip(strip_str) + except TypeError: + # reach here when data and strip_str type are different + if isinstance(data, bytes): + return MultiByteStrDecoder(data).unicode_str.strip(strip_str) + elif isinstance(strip_str, bytes): + return data.strip(MultiByteStrDecoder(strip_str).unicode_str) + + def __dequote(self, s: str) -> str: + if not self.dequote or not s: + return s + + try: + if (s[0] == s[-1]) and s.startswith(("'", '"')): + if s.count(s[0]) == 2: + return s[1:-1] + except TypeError: + pass + + return s + + def __process_line_break(self, data: str) -> str: + lbh = self.line_break_handling + + if lbh == LineBreakHandling.NOP: + return data + + try: + if lbh == LineBreakHandling.REPLACE: + return _RE_LINE_BREAK.sub(self.line_break_repl, data) + + if lbh == LineBreakHandling.ESCAPE: + return data.replace("\n", "\\n").replace("\r", "\\r") + except (TypeError, AttributeError): + return data + + raise ValueError(f"unexpected line_break_handling: {lbh}") + + def __escape_formula_injection(self, data: str) -> str: + if not self.is_escape_formula_injection: + return data + + try: + if _RE_FORMULA_PREFIX.search(data): + return "'" + data + except (TypeError, AttributeError): + return data + + return data diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py b/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..94ab3e70fe5810607d0f979b41461191c741a81c --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/__init__.py @@ -0,0 +1,7 @@ +from ._logger import logger, set_logger + + +__all__ = ( + "logger", + "set_logger", +) diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..111849d8a40a1c9488b0ff2d508311ae16024182 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd1e3365eb4233ecfc5f4a7f6c5fed771e2a2580 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b153ed869d4f3c3bc4f3477a74ed633264084592 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py b/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..df419f5674b030cebd0bc894863de64b34640dd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/_logger.py @@ -0,0 +1,22 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from ._null_logger import NullLogger + + +MODULE_NAME = "dataproperty" + +try: + from loguru import logger + + logger.disable(MODULE_NAME) +except ImportError: + logger = NullLogger() # type: ignore + + +def set_logger(is_enable: bool, propagation_depth: int = 1) -> None: + if is_enable: + logger.enable(MODULE_NAME) + else: + logger.disable(MODULE_NAME) diff --git a/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py b/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0f293ce2a288057ea07297e21504c7669edfd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/dataproperty/logger/_null_logger.py @@ -0,0 +1,41 @@ +class NullLogger: + level_name = None + + def remove(self, handler_id=None): # pragma: no cover + pass + + def add(self, sink, **kwargs): # pragma: no cover + pass + + def disable(self, name): # pragma: no cover + pass + + def enable(self, name): # pragma: no cover + pass + + def critical(self, __message, *args, **kwargs): # pragma: no cover + pass + + def debug(self, __message, *args, **kwargs): # pragma: no cover + pass + + def error(self, __message, *args, **kwargs): # pragma: no cover + pass + + def exception(self, __message, *args, **kwargs): # pragma: no cover + pass + + def info(self, __message, *args, **kwargs): # pragma: no cover + pass + + def log(self, __level, __message, *args, **kwargs): # pragma: no cover + pass + + def success(self, __message, *args, **kwargs): # pragma: no cover + pass + + def trace(self, __message, *args, **kwargs): # pragma: no cover + pass + + def warning(self, __message, *args, **kwargs): # pragma: no cover + pass diff --git a/venv/lib/python3.10/site-packages/dataproperty/py.typed b/venv/lib/python3.10/site-packages/dataproperty/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/triton/__init__.py b/venv/lib/python3.10/site-packages/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..76299bdeeb30aea0158c1085ec41200907f5a718 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/__init__.py @@ -0,0 +1,69 @@ +"""isort:skip_file""" +__version__ = '2.3.0' + +# --------------------------------------- +# Note: import order is significant here. + +# submodules +from .runtime import ( + autotune, + Config, + heuristics, + JITFunction, + KernelInterface, + reinterpret, + TensorWrapper, + OutOfResources, + MockTensor, +) +from .runtime.jit import jit +from .compiler import compile, CompilationError + +from . import language +from . import testing +from . import tools + +__all__ = [ + "autotune", + "cdiv", + "CompilationError", + "compile", + "Config", + "heuristics", + "impl", + "jit", + "JITFunction", + "KernelInterface", + "language", + "MockTensor", + "next_power_of_2", + "ops", + "OutOfResources", + "reinterpret", + "runtime", + "TensorWrapper", + "testing", + "tools", +] + +# ------------------------------------- +# misc. utilities that don't fit well +# into any specific module +# ------------------------------------- + + +def cdiv(x: int, y: int): + return (x + y - 1) // y + + +def next_power_of_2(n: int): + """Return the smallest power of 2 greater than or equal to n""" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n += 1 + return n diff --git a/venv/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e01fd0f61acbfd427d09830acb29cc6d4cb43844 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a7fa1b5fe7500bb6984ef35594d00832e338b9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/common/__init__.py b/venv/lib/python3.10/site-packages/triton/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfb6f8870e48aff3f0e195ba7820fa9d68235fb2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/common/__init__.py @@ -0,0 +1,3 @@ +from .build import _build, cuda_include_dir, libcuda_dirs + +__all__ = ["_build", "libcuda_dirs", "cuda_include_dir"] diff --git a/venv/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a2380aa2193df15de2819b6f78ca5773eec239 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71c8f8aaff3574c8a99808ed21a09abf7cce95fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/common/__pycache__/backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a9815db931ee2ccdd2e68b1a893aac7f4fdd3db Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/common/__pycache__/build.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/common/backend.py b/venv/lib/python3.10/site-packages/triton/common/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..fffbf600af5f8e2b20b54d46b0d1c451ad7d3c4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/common/backend.py @@ -0,0 +1,183 @@ +import functools +import hashlib +import importlib +import importlib.util +import os +import re +import subprocess +import traceback +from typing import Dict + +from ..runtime.driver import DriverBase + +TRITON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +TRITON_VERSION = "2.3.0" + + +class BaseBackend: + + def __init__(self, device_type: str) -> None: + self.device_type = device_type + + def add_stages(self, arch, extern_libs, stages): + """ + Custom the arch, extern_libs and stages per backend specific requirement + """ + raise NotImplementedError + + def add_meta_info(self, ir, cur_module, next_module, metadata, asm): + """ + Custom the ir, module, metadata and asm per backend specific requirement + """ + raise NotImplementedError + + def get_load_binary_fn(self): + """ + Return a callable to load binary + """ + raise NotImplementedError + + def get_driver(self) -> DriverBase: + """ + Get the backend driver. Please refer to "DriverBase" for more details + """ + raise NotImplementedError + + def get_stream(self): + """ + Get stream for current device + """ + raise NotImplementedError + + def get_device_properties(self, device): + raise NotImplementedError + + def get_current_device(self): + """ + Get current device + """ + raise NotImplementedError + + def set_current_device(self, device): + """ + Set current device as the given device + """ + raise NotImplementedError + + def get_kernel_bin(self): + raise NotImplementedError + + def make_launcher_stub(self, name, signature, constants): + """ + Generate the launcher stub to launch the kernel + """ + raise NotImplementedError + + def get_architecture_descriptor(self, **kwargs): + """ + Get the architecture descriptor the backend + """ + raise NotImplementedError + + @classmethod + def create_backend(cls, device_type: str): + return cls(device_type) + + +_backends: Dict[str, BaseBackend] = {} + + +def register_backend(device_type: str, backend_cls: type): + if device_type not in _backends: + _backends[device_type] = backend_cls.create_backend(device_type) + + +def get_backend(device_type: str): + if device_type not in _backends: + device_backend_package_name = f"...third_party.{device_type}" + if importlib.util.find_spec(device_backend_package_name, package=__spec__.name): + try: + importlib.import_module(device_backend_package_name, package=__spec__.name) + except Exception: + traceback.print_exc() + else: + return None + return _backends[device_type] if device_type in _backends else None + + +def _path_to_binary(binary: str): + base_dir = os.path.join(os.path.dirname(__file__), os.pardir) + paths = [ + os.environ.get(f"TRITON_{binary.upper()}_PATH", ""), + os.path.join(base_dir, "third_party", "cuda", "bin", binary) + ] + + for p in paths: + bin = p.split(" ")[0] + if os.path.exists(bin) and os.path.isfile(bin): + result = subprocess.check_output([bin, "--version"], stderr=subprocess.STDOUT) + if result is not None: + version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE) + if version is not None: + return p, version.group(1) + raise RuntimeError(f"Cannot find {binary}") + + +@functools.lru_cache() +def path_to_ptxas(): + return _path_to_binary("ptxas") + + +@functools.lru_cache() +def path_to_cuobjdump(): + return _path_to_binary("cuobjdump") + + +@functools.lru_cache() +def path_to_nvdisasm(): + return _path_to_binary("nvdisasm") + + +@functools.lru_cache() +def compute_core_version_key(): + import pkgutil + contents = [] + # frontend + with open(__file__, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + # compiler + compiler_path = os.path.join(TRITON_PATH, 'compiler') + for lib in pkgutil.iter_modules([compiler_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + # backend + libtriton_hash = hashlib.sha1() + with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f: + while True: + chunk = f.read(1024**2) + if not chunk: + break + libtriton_hash.update(chunk) + contents.append(libtriton_hash.hexdigest()) + # language + language_path = os.path.join(TRITON_PATH, 'language') + for lib in pkgutil.iter_modules([language_path]): + with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: + contents += [hashlib.sha1(f.read()).hexdigest()] + return '-'.join(TRITON_VERSION) + '-'.join(contents) + + +_cached_cuda_version_key = None + + +def get_cuda_version_key(): + global _cached_cuda_version_key + if _cached_cuda_version_key is None: + key = compute_core_version_key() + try: + ptxas = path_to_ptxas()[0] + ptxas_version = subprocess.check_output([ptxas, "--version"]) + except RuntimeError: + ptxas_version = b"NO_PTXAS" + _cached_cuda_version_key = key + '-' + hashlib.sha1(ptxas_version).hexdigest() + return _cached_cuda_version_key diff --git a/venv/lib/python3.10/site-packages/triton/common/build.py b/venv/lib/python3.10/site-packages/triton/common/build.py new file mode 100644 index 0000000000000000000000000000000000000000..4153272a267cbf346b4db436ec230a2181923490 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/common/build.py @@ -0,0 +1,140 @@ +import contextlib +import functools +import io +import os +import shutil +import subprocess +import sys +import sysconfig + +import setuptools + + +# TODO: is_hip shouldn't be here +def is_hip(): + import torch + return torch.version.hip is not None + + +@functools.lru_cache() +def libcuda_dirs(): + env_libcuda_path = os.getenv("TRITON_LIBCUDA_PATH") + if env_libcuda_path: + return [env_libcuda_path] + + libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode() + # each line looks like the following: + # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1 + locs = [line.split()[-1] for line in libs.splitlines() if "libcuda.so" in line] + dirs = [os.path.dirname(loc) for loc in locs] + env_ld_library_path = os.getenv("LD_LIBRARY_PATH") + if env_ld_library_path and not dirs: + dirs = [dir for dir in env_ld_library_path.split(":") if os.path.exists(os.path.join(dir, "libcuda.so"))] + msg = 'libcuda.so cannot found!\n' + if locs: + msg += 'Possible files are located at %s.' % str(locs) + msg += 'Please create a symlink of libcuda.so to any of the file.' + else: + msg += 'Please make sure GPU is setup and then run "/sbin/ldconfig"' + msg += ' (requires sudo) to refresh the linker cache.' + assert any(os.path.exists(os.path.join(path, 'libcuda.so')) for path in dirs), msg + return dirs + + +@functools.lru_cache() +def rocm_path_dir(): + return os.getenv("ROCM_PATH", default="/opt/rocm") + + +@contextlib.contextmanager +def quiet(): + old_stdout, old_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = io.StringIO(), io.StringIO() + try: + yield + finally: + sys.stdout, sys.stderr = old_stdout, old_stderr + + +@functools.lru_cache() +def cuda_include_dir(): + base_dir = os.path.join(os.path.dirname(__file__), os.path.pardir) + cuda_path = os.path.join(base_dir, "third_party", "cuda") + return os.path.join(cuda_path, "include") + + +def _build(name, src, srcdir): + if is_hip(): + hip_lib_dir = os.path.join(rocm_path_dir(), "lib") + hip_include_dir = os.path.join(rocm_path_dir(), "include") + else: + cuda_lib_dirs = libcuda_dirs() + cu_include_dir = cuda_include_dir() + suffix = sysconfig.get_config_var('EXT_SUFFIX') + so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix)) + # try to avoid setuptools if possible + cc = os.environ.get("CC") + if cc is None: + # TODO: support more things here. + clang = shutil.which("clang") + gcc = shutil.which("gcc") + cc = gcc if gcc is not None else clang + if cc is None: + raise RuntimeError("Failed to find C compiler. Please specify via CC environment variable.") + # This function was renamed and made public in Python 3.10 + if hasattr(sysconfig, 'get_default_scheme'): + scheme = sysconfig.get_default_scheme() + else: + scheme = sysconfig._get_default_scheme() + # 'posix_local' is a custom scheme on Debian. However, starting Python 3.10, the default install + # path changes to include 'local'. This change is required to use triton with system-wide python. + if scheme == 'posix_local': + scheme = 'posix_prefix' + py_include_dir = sysconfig.get_paths(scheme=scheme)["include"] + + if is_hip(): + ret = subprocess.check_call([ + cc, src, f"-I{hip_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", + f"-L{hip_lib_dir}", "-lamdhip64", "-o", so + ]) + else: + cc_cmd = [ + cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", + "-o", so + ] + cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs] + ret = subprocess.check_call(cc_cmd) + + if ret == 0: + return so + # fallback on setuptools + extra_compile_args = [] + library_dirs = cuda_lib_dirs + include_dirs = [srcdir, cu_include_dir] + libraries = ['cuda'] + # extra arguments + extra_link_args = [] + # create extension module + ext = setuptools.Extension( + name=name, + language='c', + sources=[src], + include_dirs=include_dirs, + extra_compile_args=extra_compile_args + ['-O3'], + extra_link_args=extra_link_args, + library_dirs=library_dirs, + libraries=libraries, + ) + # build extension module + args = ['build_ext'] + args.append('--build-temp=' + srcdir) + args.append('--build-lib=' + srcdir) + args.append('-q') + args = dict( + name=name, + ext_modules=[ext], + script_args=args, + ) + with quiet(): + setuptools.setup(**args) + return so diff --git a/venv/lib/python3.10/site-packages/triton/ops/__init__.py b/venv/lib/python3.10/site-packages/triton/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ceec8b56a00ba5ad66682aa3f8cdd7015a304a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/__init__.py @@ -0,0 +1,14 @@ +# from .conv import _conv, conv +from . import blocksparse +from .cross_entropy import _cross_entropy, cross_entropy +from .flash_attention import attention +from .matmul import _matmul, matmul + +__all__ = [ + "blocksparse", + "_cross_entropy", + "cross_entropy", + "_matmul", + "matmul", + "attention", +] diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__init__.py b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b24b5377fab564ac951b827e398c744524d711c --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__init__.py @@ -0,0 +1,7 @@ +from .matmul import matmul +from .softmax import softmax + +__all__ = [ + "matmul", + "softmax", +] diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/matmul.py b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..098e1543809e9dcea3a77c3a38538bcd34153eb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/matmul.py @@ -0,0 +1,432 @@ +import torch + +from ... import cdiv, heuristics, jit +from ... import language as tl + +# ******************************************************** +# -------------------------------------------------------- +# Sparse = Dense x Dense (SDD) +# This operation uses super-blocking to make sure that +# it's done efficiently when small blocks can be grouped +# together +# -------------------------------------------------------- +# ******************************************************** + + +@heuristics({ + 'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0, +}) +@jit +def _sdd_kernel(A, B, C, # + stride_za, stride_ha, stride_ma, stride_ak, # + stride_zb, stride_hb, stride_bk, stride_nb, # + stride_zc, stride_hc, stride_mc, stride_nc, # + K, grid_offset, lut, # + TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, # + BLOCK: tl.constexpr, EVEN_K: tl.constexpr # + ): + # ------------ # + # - Prologue - # + # ------------ # + block_id = tl.program_id(0) + grid_offset + lut += block_id * 3 + # offsets + off_z = tl.program_id(2) # batch + off_h = tl.load(lut + 0) # head + + # initialize pointers to A + start_am = tl.load(lut + 1) + offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK) + offs_ak = tl.arange(0, TILE_K) + a_ptrs = A \ + + off_z * stride_za \ + + off_h * stride_ha \ + + offs_am[:, None] * stride_ma \ + + offs_ak[None, :] * stride_ak + # initialize pointers to B + start_bn = tl.load(lut + 2) + offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK) + offs_bk = tl.arange(0, TILE_K) + b_ptrs = B \ + + off_z * stride_zb \ + + off_h * stride_hb \ + + offs_bn[None, :] * stride_nb \ + + offs_bk[:, None] * stride_bk + # ---------------- # + # Inner Loop # + # ---------------- # + acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) + for k in range(K, 0, -TILE_K): + if EVEN_K: + a = tl.load(a_ptrs) + b = tl.load(b_ptrs) + else: + a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.) + b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.) + acc += tl.dot(a, b, out_dtype=tl.float32) + a_ptrs += TILE_K * stride_ak + b_ptrs += TILE_K * stride_bk + c = acc.to(C.dtype.element_ty) + # ---------------- # + # Epilogue # + # ---------------- # + offs_cm = tl.arange(0, TILE_M) % BLOCK + offs_cn = tl.arange(0, TILE_N) % BLOCK + pc = C \ + + off_z * stride_zc \ + + block_id * stride_hc \ + + offs_cm[:, None] * stride_mc \ + + offs_cn[None, :] * stride_nc + tl.store(pc, c, mask=True) + + +def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None): + if a.stride(2) != 1 and a.stride(3) != 1: + a = a.contiguous() + if b.stride(2) != 1 and b.stride(3) != 1: + b = b.contiguous() + # (A * B)^T = B^T * A^T + if trans_c: + a, b = b, a + trans_a, trans_b = not trans_b, not trans_a + # shape constraints + a_dim = -2 if trans_a else -1 + b_dim = -1 if trans_b else -2 + Ka, Kb = a.shape[a_dim], b.shape[b_dim] + if Ka != Kb: + raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})") + # allocate output + if out is None: + c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device) + else: + assert out.shape == (a.shape[0], lut.shape[0], block, block) + c = out + grid = [c.shape[1], 1, c.shape[0]] + _sdd_kernel[grid]( + a, b, c, # + a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), # + b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), # + c.stride(0), c.stride(1), c.stride(2), c.stride(3), # + Ka, 0, lut, # + TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4, # + num_warps=4 # + ) + return c + + +def sdd_lut(layout, block, device): + lut = layout.nonzero(as_tuple=False).to(device).int() + lut = lut.contiguous() + return lut, None + + +# ----------------------------- +# Dense = Sparse x Dense (DSD) +# This operation uses a look-up table that contains pre-computed pointer increments +# in order to minimize computations in the inner loop of the matmul kernel. +# ----------------------------- + + +@jit +def _dsd_kernel(A, B, C, # + stride_az, stride_ha, stride_am, stride_ak, # + stride_zb, stride_hb, stride_bk, stride_bn, # + stride_zc, stride_hc, stride_cm, stride_cn, # + DS0, DS1, lut, # + TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr, # + GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr # + ): + # ------------ # + # - Prologue - # + # ------------ # + pid_m = tl.program_id(0) + pid_n = tl.program_id(1) + num_pid_m = tl.num_programs(0) + num_pid_n = tl.num_programs(1) + pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M) + pidz = tl.program_id(2) + header = lut + pid_n * 4 + offset = tl.load(header + 0) + K = tl.load(header + 1) + column = tl.load(header + 2) + off_h = tl.load(header + 3) + pinc = lut + offset + # initialize pointers to A (sparse) + block_id = tl.load(pinc + 1) + block_id = tl.multiple_of(block_id, 8) # compiler hint + offs_am = tl.arange(0, TILE_M) + offs_ak = tl.arange(0, TILE_K) + pa = A + pidz * stride_az \ + + block_id * stride_ha \ + + offs_am[:, None] * stride_am \ + + offs_ak[None, :] * stride_ak + # initialize pointers to B (dense) + offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N) + offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N) + start_bk = tl.load(pinc) + start_bk = tl.multiple_of(start_bk, 8) # compiler hint + offs_bk = start_bk + tl.arange(0, TILE_K) + pb = B + pidz * stride_zb \ + + off_h * stride_hb \ + + offs_bn[None, :] * stride_bn \ + + offs_bk[:, None] * stride_bk + # ---------------- # + # Inner Loop # + # ---------------- # + acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) + pinc += 2 + inc_a = tl.load(pinc + 1) + inc_a = tl.multiple_of(inc_a, 8) + inc_b = tl.load(pinc) + inc_b = tl.multiple_of(inc_b, 8) + for k in range(K, 0, -TILE_K): + a = tl.load(pa) + b = tl.load(pb) + acc += tl.dot(a, b, out_dtype=tl.float32) + pa += inc_a + pb += inc_b * stride_bk + pinc += 2 + inc_a = tl.load(pinc + 1) + inc_a = tl.multiple_of(inc_a, 8) + inc_b = tl.load(pinc) + inc_b = tl.multiple_of(inc_b, 8) + c = acc.to(C.dtype.element_ty) + # initialize pointers to C + offs_cm = column * TILE_M + tl.arange(0, TILE_M) + offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N) + pc = C \ + + off_h * stride_hc \ + + pidz * stride_zc \ + + offs_cm[:, None] * stride_cm \ + + offs_cn[None, :] * stride_cn + tl.store(pc, c, mask=offs_cn[None, :] < DS0) + + +def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): + if a.stride(2) != 1 and a.stride(3) != 1: + a = a.contiguous() + if b.stride(2) != 1 and b.stride(3) != 1: + b = b.contiguous() + # shapes / dtypes + AS1 = block * spdims[2 if trans_a else 1] + BS0 = b.size(0) + BS1 = b.size(1) + BS3 = b.size(2 if trans_b else 3) + dtype = a.dtype + # allocate output + CS0 = BS0 + CS1 = BS1 + CS2 = BS3 if trans_c else AS1 + CS3 = AS1 if trans_c else BS3 + if out is None: + c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device) + else: + assert out.shape == (CS0, CS1, CS2, CS3) + c = out + # meta-parameter heuristics + TILE_N = 128 + # compute output + grid = lambda meta: [cdiv(BS3, meta['TILE_N']), width, BS0] + _dsd_kernel[grid]( + a, b, c, # + a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3), # + b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3), # + c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3), # + BS3, AS1, lut, # + TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4, # + num_warps=4, GROUP_SIZE_M=4 # + ) + # exit() + return c + + +def dsd_lut(layout, block, step, trans, device): + """ + Generates the look-up table for incrementing pointers in the DSD/DDS matmul. + Example (BLOCK=32, STEP=16) + [[1, 0, 0, 1, 0], + [0, 1, 1, 0, 1], + [1, 0, 1, 0, 0]] + + Then the offsets for A are + [0 , 16, 32, 48] <- row 0 + \\----/ \\----/ + col=0 col=3 + [64, 80, 96, 112, 128, 144] <- row 1 + \\----/ \\----/ \\------/ + col=1 col=2 col=3 + [160, 176, 192, 208] + which leads to increments table + [0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16] + + Because B is dense, the offsets are + [0, 16, 96, 112] <- row 0 + [32, 48, 64, 80] <- row 1 + [0, 16, 64, 80] <- row 2 + """ + sizes = torch.sum(layout, 2 if trans else 1) + head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True) + sizes = sizes.flatten() + segments = sizes * step + # pointer increments + if trans: + nnz = layout.nonzero(as_tuple=False) + else: + nnz = layout.transpose(1, 2).nonzero(as_tuple=False) + num_blocks = nnz.size(0) + offsets = torch.zeros_like(sizes) + offsets[1:] = torch.cumsum(sizes[:-1], dim=0) + offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets)) + # ------------------------------- + # dense input pointer increments + # ------------------------------- + # Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K) + # that is smaller than the block size, so we need to do a bit of extra work + # to handle this case + B_idx = nnz[:, 2] * block + B_incs = B_idx.clone() + B_incs[1:] -= B_idx[:-1] + div = block // step + B_incs = B_incs.view(-1, 1).repeat(1, div) + B_incs[:, 1:] = step + B_incs[:, 0] -= (div - 1) * step + # first increment for each reduction is actually the offset + B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]] + B_incs = B_incs.view(-1) + # ------------------------------- + # sparse input pointer increments + # ------------------------------- + # same as above, except that the increments are in the sparse memory layout + if trans: + A_idx = torch.arange(num_blocks, device=layout.device) + else: + A_idx = torch.tensor([], dtype=torch.int64, device=layout.device) + current_offset = 0 + for z in range(layout.size(0)): + layoutw = layout[z, :, :].clone().long() + msum = layoutw.sum() + layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device) + A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1)) + current_offset += msum + A_incs = A_idx * block * block + A_incs[1:] -= A_idx[:-1] * block * block + A_incs = A_incs.view(-1, 1).repeat(1, div) + if trans: + A_incs[:, 1:] = step + A_incs[:, 0] -= (div - 1) * step + else: + A_incs[:, 1:] = step * block + A_incs[:, 0] -= (div - 1) * step * block + A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]] + A_incs = A_incs.view(-1) + # create header + width = col_id.size(0) + offsets = offsets * 2 * div + 4 * width + segments = segments * div + header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous() + # create increments + incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous() + # pad by a factor 2*MAX_NUM_STAGES + # to accommodate pre-fetching inside the kernel + pad = torch.zeros(20, device=incs.device, dtype=incs.dtype) + incs = torch.cat((incs, pad)) + # create lut + lut = torch.cat((header, incs)) + lut = lut.type(torch.int32).to(device) + # create locks + return lut, width + + +# ----------------------------- +# Dense = Dense x Sparse (DDS) +# ----------------------------- +# AB = (B^T A^T)^T + + +def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None): + return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out) + + +############## +# MAIN API # +############## + + +class _matmul(torch.autograd.Function): + + fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul} + + @staticmethod + def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_width, da_lut, da_width, db_lut, + db_width, out): + c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out) + # save for backward + ctx.save_for_backward(a, b) + ctx.da_lut = da_lut + ctx.da_width = da_width + ctx.db_lut = db_lut + ctx.db_width = db_width + ctx.mode = mode + ctx.spdims = spdims + ctx.block = block + ctx.trans_a = trans_a + ctx.trans_b = trans_b + ctx.trans_c = trans_c + ctx.has_out = out is not None + return c + + @staticmethod + def backward(ctx, dc): + # saved for backward + a, b = ctx.saved_tensors + da, db = None, None + mode = ctx.mode + # gradients w.r.t. a + if ctx.needs_input_grad[0]: + mode_da = mode[1] + mode[0] + mode[2] + da = _matmul.fn[mode_da](dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, + ctx.da_lut, ctx.da_width) + # gradients w.r.t. b + if ctx.needs_input_grad[1]: + mode_db = mode[2] + mode[1] + mode[0] + db = _matmul.fn[mode_db](a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block, + ctx.db_lut, ctx.db_width) + dout = dc if ctx.has_out else None + return da, db, None, None, None, \ + None, None, None, None, \ + None, None, None, None, None, dout + + +class matmul: + + def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False): + if mode not in ['sdd', 'dsd', 'dds']: + raise NotImplementedError('Supported modes are: sdd, dsd, dds') + self.block = block + self.mode = mode + self.trans_a = trans_a + self.trans_b = trans_b + self.trans_c = trans_c + self.layout = layout + self.spdims = layout.shape + step = min(block, 32) + if self.mode == 'sdd': + self.c_lut, self.c_width = sdd_lut(layout, block, device) + self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device) + self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device) + if self.mode == 'dsd': + self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device) + self.da_lut, self.da_width = sdd_lut(layout, block, device) + self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device) + if self.mode == 'dds': + self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device) + self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device) + self.db_lut, self.db_width = sdd_lut(layout, block, device) + + def __call__(self, a, b, out=None): + c = _matmul.apply(a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block, # + self.c_lut, self.c_width, # + self.da_lut, self.da_width, # + self.db_lut, self.db_width, # + out) + return c diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/softmax.py b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..bcffff26bb515aa65f9e199d9e7c3b36bc6fe1c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/softmax.py @@ -0,0 +1,228 @@ +import torch + +from ... import jit +from ... import language as tl +from ... import next_power_of_2 + + +def num_warps(n): + if n <= 128: + return 1 + if n <= 256: + return 2 + if n <= 512: + return 4 + if n <= 4096: + return 8 + return 16 + + +@jit +def _blocksparse_softmax_fwd(Out, A, stride_xz, LUT, # + R, extent, stride_zr, stride_hr, # relative attention + scale, is_causal, # + ROW_SIZE: tl.constexpr, # + BLOCK_SIZE: tl.constexpr, # + IS_DENSE: tl.constexpr # + ): + h = tl.program_id(0) + m = tl.program_id(1) + z = tl.program_id(2) + # create index ranges + hm = h * tl.num_programs(1) + m + lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE + block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE + # extract information from LUT + header = LUT + (hm // BLOCK_SIZE) * 2 + size = tl.load(header + 0) + offset = tl.load(header + 1) + # pointer offset + off_a = z * stride_xz + off_a += (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE # block indx + off_a += (m % BLOCK_SIZE) * BLOCK_SIZE # row indx + # do not need to read column indices in the dense case + if IS_DENSE: + ns = tl.arange(0, ROW_SIZE) + else: + off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE + start_n = tl.load(LUT + off_lut + block_n, mask=block_n < size, other=0) + ns = start_n * BLOCK_SIZE + lane_n + # load X + mask = block_n < size + a = tl.load(A + off_a + lane_n, mask=mask, other=-float("inf")) + a = a.to(tl.float32) + # compute + out = a + out *= scale + # apply relative attention + if R is not None: + R += z * stride_zr + R += h * stride_hr + off_lo = (extent - m - 1) + ns + mask_lo = (off_lo >= 0) & (off_lo < extent) + rel_logits = tl.load(R + m * extent + off_lo, mask=mask_lo, other=0.0) + out += rel_logits + out = out.to(tl.float32) + # apply causal mask + out = tl.where((ns > m) & is_causal, -float("inf"), out) + # computation + out = tl.softmax(out) + # write-back + tl.store(Out + off_a + lane_n, out, mask=mask) + + +@jit +def _blocksparse_softmax_bwd(DA, stride_zdx, # + DOut, stride_zdout, # + Out, stride_zout, # + scale, # + LUT, # + DR, extent, stride_zr, stride_hr, stride_er, # + is_causal, # + ROW_SIZE: tl.constexpr, # + BLOCK_SIZE: tl.constexpr, # + IS_DENSE: tl.constexpr): + h = tl.program_id(0) + m = tl.program_id(1) + z = tl.program_id(2) + # create index ranges + hm = h * tl.num_programs(1) + m + lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE + block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE + # extract information from LUT + header = LUT + (hm // BLOCK_SIZE) * 2 + size = tl.load(header + 0) + offset = tl.load(header + 1) + # row-col offset + off_mn = (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE + off_mn += (m % BLOCK_SIZE) * BLOCK_SIZE + mask = block_n < size + # pointers + As = Out + z * stride_zout + off_mn + DOuts = DOut + z * stride_zdout + off_mn + # do not need to read column indices in the dense case + if IS_DENSE: + ns = tl.arange(0, ROW_SIZE) + else: + off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE + start_n = tl.load(LUT + off_lut + block_n, mask=mask, other=0) + ns = start_n * BLOCK_SIZE + lane_n + # load data + a = tl.load(As + lane_n, mask=mask, other=0.0) + a = a.to(tl.float32) + dout = tl.load(DOuts + lane_n, mask=mask, other=0.0) + dout = dout.to(tl.float32) + # compute + a = tl.where((ns > m) & is_causal & (a == a), 0., a) + da = a * (dout - tl.sum(a * dout, 0)) + # apply relative attention + if DR is not None: + DR += z * stride_zr + DR += h * stride_hr + off_lo = (extent - m - 1) + ns + mask_lo = (off_lo >= 0) & (off_lo < extent) & mask + tl.store(DR + m * extent + off_lo, da, mask=mask_lo) + da = da * scale + # convert da + # write-back + DAs = DA + z * stride_zdx + off_mn + tl.store(DAs + lane_n, da, mask=mask) + + +class _softmax(torch.autograd.Function): + + @staticmethod + def make_lut(layout, block, device): + _empty = torch.tensor([], dtype=torch.int64, device=layout.device) + sizes = _empty.clone() + # sizes along rows + for h in range(layout.shape[0]): + sizes = torch.cat((sizes, layout[h, :, :].sum(-1))) + total_sizes = sizes * block + # offsets in block format + offsets = torch.zeros_like(sizes) + offsets[1:] = torch.cumsum(sizes[:-1], dim=0) + # block indices + columns = layout.nonzero(as_tuple=False)[:, 2] + header = torch.stack((sizes, offsets), dim=1).view(-1) + lut = torch.cat((header, columns)).type(torch.int32).to(device) + return lut, int(total_sizes.max()) + + @staticmethod + def forward(ctx, a, scale, rel_logits, is_causal, spdims, block, lut, maxlut, is_dense): + if scale is not None and isinstance(scale, torch.Tensor): + assert scale.device.type == "cpu" + scale = scale.item() + M = a.shape[0] + grid = [spdims[0], spdims[1] * block, M] + rel_shape = (1, 1, 1, 1) if rel_logits is None else rel_logits.shape + rel_strides = (1, 1, 1, 1) if rel_logits is None else rel_logits.stride() + # enqueue kernel + out = torch.empty_like(a) + _blocksparse_softmax_fwd[grid]( + out, a, a.stride(0), lut, # + rel_logits, rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn# + scale, # + is_causal, # + BLOCK_SIZE=block, # + ROW_SIZE=next_power_of_2(maxlut), # + IS_DENSE=is_dense, # + num_warps=num_warps(maxlut) # + ) + # save to context + # ctx.mark_dirty(x) + ctx.save_for_backward(out, lut) + ctx.spdims = spdims + ctx.block = block + ctx.maxlut = maxlut + ctx.scale = scale + ctx.rel_shape = rel_shape + ctx.rel_strides = rel_strides + ctx.rel_dtype = a.dtype + ctx.is_dense = is_dense + ctx.is_causal = is_causal + return out + + @staticmethod + def backward(ctx, dout): + # retrieve from context + out, lut = ctx.saved_tensors + # relative logits gradients + dr = None + if ctx.needs_input_grad[3]: + dr = torch.zeros(ctx.rel_shape, dtype=ctx.rel_dtype, device=out.device) + # run kernel + M = out.shape[0] + grid = (ctx.spdims[0], ctx.spdims[1] * ctx.block, M) + da = torch.empty_like(dout) + _blocksparse_softmax_bwd[grid]( + da, da.stride(0), # + dout, dout.stride(0), # + out, out.stride(0), # + ctx.scale, # + lut, # + dr, ctx.rel_shape[-1], ctx.rel_strides[0], ctx.rel_strides[1], ctx.rel_strides[2], # + ctx.is_causal, # + BLOCK_SIZE=ctx.block, # + ROW_SIZE=next_power_of_2(ctx.maxlut), # + IS_DENSE=ctx.is_dense, # + num_warps=num_warps(ctx.maxlut) # + ) + return (da, None, None, dr, None, None, None, None, None, None, None, None, None, None, None, None, None, None) + + +class softmax: + + def __init__(self, layout, block, device, is_dense=False): + self.spdims = layout.shape + self.layout = layout + self.block = block + self.lut, self.maxlut = _softmax.make_lut(self.layout, self.block, device) + self.is_dense = is_dense + + def __call__(self, a, *, scale=1.0, rel_logits=None, is_causal=False): + if rel_logits is not None and rel_logits.dtype != a.dtype: + raise ValueError(f"relative position embedding must be {a.dtype}") + a = _softmax.apply(a, scale, rel_logits, is_causal, self.spdims, self.block, self.lut, self.maxlut, + self.is_dense) + return a diff --git a/venv/lib/python3.10/site-packages/triton/ops/cross_entropy.py b/venv/lib/python3.10/site-packages/triton/ops/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..88e8dae50db09ca17c1bb8464afee2975b3d77eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/cross_entropy.py @@ -0,0 +1,96 @@ +import torch + +from .. import heuristics, jit +from .. import language as tl +from .. import next_power_of_2 + + +def num_warps(N): + if N < 2048: + return 4 + elif N < 8192: + return 8 + return 16 + + +@heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])}) +@heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])}) +@jit +def _forward(LOGITS, PROBS, IDX, LOSS, N, BLOCK: tl.constexpr): + row = tl.program_id(0) + cols = tl.arange(0, BLOCK) + idx = tl.load(IDX + row) + # pointers to logit and probs + LOGITS = LOGITS + row * N + cols + WRIT_PROBS = PROBS + row * N + cols + READ_PROBS = PROBS + row * N + idx + # write-back negative log-probs + logits = tl.load(LOGITS, mask=cols < N, other=-float('inf')) + logits = logits.to(tl.float32) + logits = logits - tl.max(logits, 0) + probs = tl.log(tl.sum(tl.exp(logits), 0)) - logits + tl.store(WRIT_PROBS, probs, mask=cols < N) + # There is a bug in the compiler, which fails to insert a barrier here. + # We add it explicitly for now. Will be fixed soon. + tl.debug_barrier() + # write-back loss + probs = tl.load(READ_PROBS) + tl.store(LOSS + row, probs) + + +@heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])}) +@heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])}) +@jit +def _backward(PROBS, IDX, DPROBS, N, BLOCK: tl.constexpr): + row = tl.program_id(0) + cols = tl.arange(0, BLOCK) + idx = tl.load(IDX + row) + # pointers to probs + PROBS = PROBS + row * N + cols + # We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k] + # and we have -log(p[k]) stored in PROBS, so this is easy + probs = -tl.load(PROBS, mask=cols < N, other=float('inf')) + probs = tl.exp(probs.to(tl.float32)) + delta = cols == idx + # write result in-place in PROBS + dout = tl.load(DPROBS + row) + din = (probs - delta) * dout + tl.store(PROBS, din.to(PROBS.dtype.element_ty), mask=cols < N) + + +class _cross_entropy(torch.autograd.Function): + + @classmethod + def forward(cls, ctx, logits, indices): + # make sure we can use triton + assert (indices.dtype == torch.int64), "Indices are expected to be of type long." + # make kernel + device, dtype = logits.device, logits.dtype + n_cols = logits.shape[-1] + # run the kernel + result = torch.empty_like(indices, dtype=dtype, device=device) + neg_logprobs = torch.empty_like(logits, dtype=dtype, device=device) + grid = lambda opt: (logits.numel() // n_cols, ) + _forward[grid](logits, neg_logprobs, indices, result, n_cols) + # save for backward + ctx.save_for_backward(neg_logprobs, indices) + return result + + @classmethod + def backward(cls, ctx, dneg_logprobs): + """We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k] + so we initialize the gradient as neg_logprobs, so we can just exponentiate + to get p[k], which is most of what we need... neg_logprobs will be + modified in place to become the gradient we want + """ + # load saved tensors + neg_logprobs, indices = ctx.saved_tensors + # run the kernel + # neg_logprobs will be modified in place to become our gradient: + n_cols = neg_logprobs.shape[-1] + grid = lambda opt: (neg_logprobs.numel() // n_cols, ) + _backward[grid](neg_logprobs, indices, dneg_logprobs, n_cols) + return neg_logprobs, None + + +cross_entropy = _cross_entropy.apply diff --git a/venv/lib/python3.10/site-packages/triton/ops/flash_attention.py b/venv/lib/python3.10/site-packages/triton/ops/flash_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..d024ba7ab005f9566542768595aa632b1de1b8aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/flash_attention.py @@ -0,0 +1,456 @@ +""" +Fused Attention +=============== +This is a Triton implementation of the Flash Attention algorithm +(see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf) + +Sequence Parallel implementation inspired by HazyResearch +(see https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attn_triton.py) +""" + +import torch + +from .. import cdiv, jit +from .. import language as tl + + +@jit +def _fwd_kernel(Q, K, V, sm_scale, # + L, # + Out, # + stride_qz, stride_qh, stride_qm, stride_qk, # + stride_kz, stride_kh, stride_kn, stride_kk, # + stride_vz, stride_vh, stride_vn, stride_vk, # + stride_oz, stride_oh, stride_om, stride_on, # + Z, H, N_CTX, # + Z_H_N_CTX, # + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, # + BLOCK_N: tl.constexpr, # + IS_CAUSAL: tl.constexpr # + ): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + qvk_offset = off_hz * stride_qh + vk_offset = qvk_offset // stride_qm + + K_block_ptr = tl.make_block_ptr( + base=K, + shape=(BLOCK_DMODEL, Z_H_N_CTX), + strides=(stride_kk, stride_kn), + offsets=(0, vk_offset), + block_shape=(BLOCK_DMODEL, BLOCK_N), + order=(0, 1), + ) + V_block_ptr = tl.make_block_ptr( + base=V, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_vn, stride_vk), + offsets=(vk_offset, 0), + block_shape=(BLOCK_N, BLOCK_DMODEL), + order=(1, 0), + ) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + # credits to: Adam P. Goucher (https://github.com/apgoucher): + # scale sm_scale by 1/log_2(e) and use + # 2^x instead of exp in the loop because CSE and LICM + # don't work as expected with `exp` in the loop + qk_scale = sm_scale * 1.44269504 + # load q: it will stay in SRAM throughout + + offs_k = tl.arange(0, BLOCK_DMODEL) + Q_ptrs = Q + qvk_offset + offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk + q = tl.load(Q_ptrs) + + q = (q * qk_scale).to(K.dtype.element_ty) + lo = 0 + hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX + for start_n in range(lo, hi, BLOCK_N): + # -- load k, v -- + k = tl.load(K_block_ptr) + v = tl.load(V_block_ptr) + # -- compute qk --- + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + if IS_CAUSAL: + qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) + qk += tl.dot(q, k, allow_tf32=True) + # -- compute scaling constant --- + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + # -- scale and update acc -- + acc *= alpha[:, None] + acc += tl.dot(p.to(V.dtype.element_ty), v, allow_tf32=True) + # -- update m_i and l_i -- + l_i = l_i * alpha + tl.sum(p, 1) + m_i = m_i_new + # update pointers + K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) + V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) + # write back l and m + acc = acc / l_i[:, None] + l_ptrs = L + off_hz * N_CTX + offs_m + tl.store(l_ptrs, m_i + tl.math.log2(l_i)) + # write back O + O_block_ptr = tl.make_block_ptr( + base=Out, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_om, stride_on), + offsets=(vk_offset + start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + # O_ptrs = Out + qvk_offset + offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk + tl.store(O_block_ptr, acc.to(K.dtype.element_ty)) + + +@jit +def _bwd_preprocess( + Out, + DO, + Delta, + BLOCK_M: tl.constexpr, + D_HEAD: tl.constexpr, +): + off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) + off_n = tl.arange(0, D_HEAD) + # load + o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) + do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) + # compute + delta = tl.sum(o * do, axis=1) + # write-back + tl.store(Delta + off_m, delta) + + +@jit +def _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, # + Out, DO, # + DQ, DK, DV, # + L, # + D, # + Q_block_ptr, K_block_ptr, V_block_ptr, # + DO_block_ptr, DQ_block_ptr, DK_block_ptr, DV_block_ptr, # + stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, # + stride_kz, stride_kh, stride_kn, stride_kk, # + stride_vz, stride_vh, stride_vn, stride_vk, # + Z, H, N_CTX, # + off_h, off_z, off_hz, start_n, num_block, # + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, # + BLOCK_N: tl.constexpr, # + SEQUENCE_PARALLEL: tl.constexpr, # + CAUSAL: tl.constexpr, # + MMA_V3: tl.constexpr # + ): + if CAUSAL: + lo = start_n * BLOCK_M + else: + lo = 0 + + Q_offset = (off_z * stride_qz + off_h * stride_qh) // stride_qm + DQ_offset = off_z * stride_qz + off_h * stride_qh + K_offset = (off_z * stride_kz + off_h * stride_kh) // stride_kn + V_offset = (off_z * stride_vz + off_h * stride_vh) // stride_vn + if SEQUENCE_PARALLEL: + DQ_offset += stride_dqa.to(tl.int64) * start_n + DQ_offset = DQ_offset // stride_qm + + Q_block_ptr = tl.advance(Q_block_ptr, (lo + Q_offset, 0)) + K_block_ptr = tl.advance(K_block_ptr, (start_n * BLOCK_M + K_offset, 0)) + V_block_ptr = tl.advance(V_block_ptr, (start_n * BLOCK_M + V_offset, 0)) + DO_block_ptr = tl.advance(DO_block_ptr, (lo + Q_offset, 0)) + DQ_block_ptr = tl.advance(DQ_block_ptr, (lo + DQ_offset, 0)) + DK_block_ptr = tl.advance(DK_block_ptr, (start_n * BLOCK_M + K_offset, 0)) + DV_block_ptr = tl.advance(DV_block_ptr, (start_n * BLOCK_M + V_offset, 0)) + + # initialize row/col offsets + offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M) + offs_m = tl.arange(0, BLOCK_N) + # pointer to row-wise quantities in value-like data + D_ptrs = D + off_hz * N_CTX + l_ptrs = L + off_hz * N_CTX + # initialize dv amd dk + dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + # k and v stay in SRAM throughout + k = tl.load(K_block_ptr) + v = tl.load(V_block_ptr) + # loop over rows + for start_m in range(lo, num_block * BLOCK_M, BLOCK_M): + offs_m_curr = start_m + offs_m + # load q, k, v, do on-chip + q = tl.load(Q_block_ptr) + # recompute p = softmax(qk, dim=-1).T + # NOTE: `do` is pre-divided by `l`; no normalization here + if CAUSAL: + qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.0), float("-inf")) + else: + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, tl.trans(k)) + qk *= qk_scale + l_i = tl.load(l_ptrs + offs_m_curr) + p = tl.math.exp2(qk - l_i[:, None]) + # compute dv + do = tl.load(DO_block_ptr) + dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do, allow_tf32=True) + # compute dp = dot(v, do) + Di = tl.load(D_ptrs + offs_m_curr) + # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] + dp = tl.dot(do, tl.trans(v), allow_tf32=True) + # compute ds = p * (dp - delta[:, None]) + ds = (p * (dp - Di[:, None]) * sm_scale).to(Q.dtype.element_ty) + # compute dk = dot(ds.T, q) + dk += tl.dot(tl.trans(ds), q, allow_tf32=True) + # compute dq + if not SEQUENCE_PARALLEL: + dq = tl.load(DQ_block_ptr) + dq += tl.dot(ds, k, allow_tf32=True) + tl.store(DQ_block_ptr, dq.to(Q.dtype.element_ty)) + elif SEQUENCE_PARALLEL: + if MMA_V3: + dq = tl.dot(ds, k, allow_tf32=True) + else: + # not work with mma v3, becuase M % 64 != 0 + dq = tl.trans(tl.dot(tl.trans(k), tl.trans(ds), allow_tf32=True)) + tl.store(DQ_block_ptr, dq.to(Q.dtype.element_ty)) + + # increment pointers + DQ_block_ptr = tl.advance(DQ_block_ptr, (BLOCK_M, 0)) + Q_block_ptr = tl.advance(Q_block_ptr, (BLOCK_M, 0)) + DO_block_ptr = tl.advance(DO_block_ptr, (BLOCK_M, 0)) + # write-back + tl.store(DV_block_ptr, dv.to(V.dtype.element_ty)) + tl.store(DK_block_ptr, dk.to(K.dtype.element_ty)) + + +@jit +def _bwd_kernel(Q, K, V, sm_scale, # + Out, DO, # + DQ, DK, DV, # + L, # + D, # + stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, # + stride_kz, stride_kh, stride_kn, stride_kk, # + stride_vz, stride_vh, stride_vn, stride_vk, # + Z, H, N_CTX, # + Z_H_N_CTX, # + SQ_Z_H_N_CTX, # + BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, # + BLOCK_N: tl.constexpr, # + SEQUENCE_PARALLEL: tl.constexpr, # + CAUSAL: tl.constexpr, # + MMA_V3: tl.constexpr # + ): + qk_scale = sm_scale * 1.44269504 + off_hz = tl.program_id(0) + off_z = off_hz // H + off_h = off_hz % H + + Q_block_ptr = tl.make_block_ptr( + base=Q, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + K_block_ptr = tl.make_block_ptr( + base=K, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_kn, stride_kk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + V_block_ptr = tl.make_block_ptr( + base=V, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_vn, stride_vk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + DO_block_ptr = tl.make_block_ptr( + base=DO, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + if SEQUENCE_PARALLEL: + DQ_block_ptr = tl.make_block_ptr( + base=DQ, + shape=(SQ_Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + else: + DQ_block_ptr = tl.make_block_ptr( + base=DQ, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_qm, stride_qk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + + DK_block_ptr = tl.make_block_ptr( + base=DK, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_kn, stride_kk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + DV_block_ptr = tl.make_block_ptr( + base=DV, + shape=(Z_H_N_CTX, BLOCK_DMODEL), + strides=(stride_vn, stride_vk), + offsets=(0, 0), + block_shape=(BLOCK_M, BLOCK_DMODEL), + order=(1, 0), + ) + + num_block_n = tl.cdiv(N_CTX, BLOCK_N) + if not SEQUENCE_PARALLEL: + for start_n in range(0, num_block_n): + _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, Out, DO, # + DQ, DK, DV, # + L, # + D, # + Q_block_ptr, K_block_ptr, V_block_ptr, # + DO_block_ptr, DQ_block_ptr, DK_block_ptr, DV_block_ptr, # + stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, # + stride_kz, stride_kh, stride_kn, stride_kk, # + stride_vz, stride_vh, stride_vn, stride_vk, # + Z, H, N_CTX, # + off_h, off_z, off_hz, start_n, num_block_n, # + BLOCK_M=BLOCK_M, BLOCK_DMODEL=BLOCK_DMODEL, # + BLOCK_N=BLOCK_N, # + SEQUENCE_PARALLEL=SEQUENCE_PARALLEL, # + CAUSAL=CAUSAL, # + MMA_V3=MMA_V3 # + ) + else: + start_n = tl.program_id(1) + _bwd_kernel_one_col_block(Q, K, V, sm_scale, qk_scale, Out, DO, # + DQ, DK, DV, # + L, # + D, # + Q_block_ptr, K_block_ptr, V_block_ptr, # + DO_block_ptr, DQ_block_ptr, DK_block_ptr, DV_block_ptr, # + stride_dqa, stride_qz, stride_qh, stride_qm, stride_qk, # + stride_kz, stride_kh, stride_kn, stride_kk, # + stride_vz, stride_vh, stride_vn, stride_vk, # + Z, H, N_CTX, # + off_h, off_z, off_hz, start_n, num_block_n, # + BLOCK_M=BLOCK_M, BLOCK_DMODEL=BLOCK_DMODEL, # + BLOCK_N=BLOCK_N, # + SEQUENCE_PARALLEL=SEQUENCE_PARALLEL, # + CAUSAL=CAUSAL, # + MMA_V3=MMA_V3 # + ) + + +class _attention(torch.autograd.Function): + + @staticmethod + def forward(ctx, q, k, v, causal, sm_scale, sequence_parallel=False): + # only support for Ampere now + capability = torch.cuda.get_device_capability() + if capability[0] < 8: + raise RuntimeError("Flash attention currently only supported for compute capability >= 80") + BLOCK_M = 128 + BLOCK_N = 64 + # shape constraints + Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] + assert Lq == Lk and Lk == Lv + assert Lk in {16, 32, 64, 128} + o = torch.empty_like(q) + grid = (cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1) + L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) + num_warps = 4 if Lk <= 64 else 8 + _fwd_kernel[grid]( + q, k, v, sm_scale, # + L, # + o, # + q.stride(0), q.stride(1), q.stride(2), q.stride(3), # + k.stride(0), k.stride(1), k.stride(2), k.stride(3), # + v.stride(0), v.stride(1), v.stride(2), v.stride(3), # + o.stride(0), o.stride(1), o.stride(2), o.stride(3), # + q.shape[0], q.shape[1], q.shape[2], # + q.shape[0] * q.shape[1] * q.shape[2], # + BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk, # + IS_CAUSAL=causal, # + num_warps=num_warps, # + num_stages=4 # + ) + + ctx.save_for_backward(q, k, v, o, L) + ctx.grid = grid + ctx.sm_scale = sm_scale + ctx.BLOCK_DMODEL = Lk + ctx.causal = causal + ctx.sequence_parallel = sequence_parallel + return o + + @staticmethod + def backward(ctx, do): + capability = torch.cuda.get_device_capability() + MMA_V3 = capability[0] >= 9 + BLOCK = 128 + q, k, v, o, L = ctx.saved_tensors + sequence_parallel = ctx.sequence_parallel + seq_len_kv = k.shape[2] + do = do.contiguous() + if sequence_parallel: + replicas = cdiv(seq_len_kv, BLOCK) + new_dq_shape = (replicas, ) + q.shape + dq = torch.zeros(new_dq_shape, device=q.device, dtype=q.dtype) + else: + dq = torch.zeros_like(q, dtype=q.dtype) + dk = torch.empty_like(k) + dv = torch.empty_like(v) + delta = torch.empty_like(L) + _bwd_preprocess[(cdiv(q.shape[2], BLOCK) * ctx.grid[1], )]( + o, + do, + delta, + BLOCK_M=BLOCK, + D_HEAD=ctx.BLOCK_DMODEL, + ) + _bwd_kernel[(ctx.grid[1], cdiv(seq_len_kv, BLOCK) if sequence_parallel else 1)]( + q, k, v, ctx.sm_scale, # + o, do, # + dq, dk, dv, # + L, # + delta, # + o.numel(), q.stride(0), q.stride(1), q.stride(2), q.stride(3), # + k.stride(0), k.stride(1), k.stride(2), k.stride(3), # + v.stride(0), v.stride(1), v.stride(2), v.stride(3), # + q.shape[0], q.shape[1], q.shape[2], # + q.shape[0] * q.shape[1] * q.shape[2], # + cdiv(seq_len_kv, BLOCK) * q.shape[0] * q.shape[1] * q.shape[2], # + BLOCK_M=BLOCK, BLOCK_N=BLOCK, # + BLOCK_DMODEL=ctx.BLOCK_DMODEL, # + SEQUENCE_PARALLEL=sequence_parallel, # + CAUSAL=ctx.causal, # + MMA_V3=MMA_V3, # + num_warps=8, # + num_stages=1 # + ) + + if len(dq.shape) == 5: + dq = dq.sum(dim=0) + return dq, dk, dv, None, None, None + + +attention = _attention.apply diff --git a/venv/lib/python3.10/site-packages/triton/ops/matmul.py b/venv/lib/python3.10/site-packages/triton/ops/matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..832e52727f092f5f543730b16406c3c9d7f05d80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/matmul.py @@ -0,0 +1,204 @@ +import torch + +from .. import Config, autotune, cdiv, heuristics, jit +from .. import language as tl +from .matmul_perf_model import early_config_prune, estimate_matmul_time + +_ordered_datatypes = [torch.float16, torch.bfloat16, torch.float32] + + +def get_higher_dtype(a, b): + if a is b: + return a + + assert a in _ordered_datatypes + assert b in _ordered_datatypes + + for d in _ordered_datatypes: + if a is d: + return b + if b is d: + return a + + +def init_to_zero(name): + return lambda nargs: nargs[name].zero_() + + +def get_configs_io_bound(): + configs = [] + for num_stages in [2, 3, 4, 5, 6]: + for block_m in [16, 32]: + for block_k in [32, 64]: + for block_n in [32, 64, 128, 256]: + num_warps = 2 if block_n <= 64 else 4 + configs.append( + Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1}, + num_stages=num_stages, num_warps=num_warps)) + # split_k + for split_k in [2, 4, 8, 16]: + configs.append( + Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k}, + num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C'))) + return configs + + +@autotune( + configs=[ + # basic configs for compute-bound matmuls + Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + # good for int8 + Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + ] + get_configs_io_bound(), + key=['M', 'N', 'K'], + prune_configs_by={ + 'early_config_prune': early_config_prune, + 'perf_model': estimate_matmul_time, + 'top_k': 10, + }, +) +@heuristics({ + 'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0, +}) +@jit +def _kernel(A, B, C, M, N, K, # + stride_am, stride_ak, # + stride_bk, stride_bn, # + stride_cm, stride_cn, # + dot_out_dtype: tl.constexpr, # + allow_tf32: tl.constexpr, # + fp8_fast_accum: tl.constexpr, # + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, # + GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, AB_DTYPE: tl.constexpr # + ): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=dot_out_dtype) + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + _0 = tl.zeros((1, 1), dtype=C.dtype.element_ty) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=_0) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=_0) + if AB_DTYPE: + a = a.to(C.dtype.element_ty) + b = b.to(C.dtype.element_ty) + if fp8_fast_accum: + acc = tl.dot(a, b, acc, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) + else: + acc += tl.dot(a, b, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + acc = acc.to(C.dtype.element_ty) + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + +class _matmul(torch.autograd.Function): + kernel = _kernel + + _locks = {} + + @staticmethod + def _call(a, b, dot_out_dtype, allow_tf32, fp8_fast_accum): + device = a.device + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + # allocates output + if a.dtype in [tl.float8e4nv, tl.float8e4b15, tl.float8e5] or\ + b.dtype in [tl.float8e4nv, tl.float8e4b15, tl.float8e5]: + c_dtype = torch.float16 + elif a.dtype in [torch.int8] or b.dtype in [torch.int8]: + c_dtype = torch.int32 + else: + c_dtype = get_higher_dtype(a.dtype, b.dtype) + c = torch.empty((M, N), device=device, dtype=c_dtype) + if dot_out_dtype is None: + if c_dtype in [torch.float16, torch.float32, torch.bfloat16]: + dot_out_dtype = tl.float32 + else: + dot_out_dtype = tl.int32 + else: + assert isinstance(dot_out_dtype, torch.dtype), "dot_out_dtype must be a torch.dtype" + if dot_out_dtype == torch.float16: + dot_out_dtype = tl.float16 + elif dot_out_dtype in [torch.float32, torch.bfloat16]: + dot_out_dtype = tl.float32 + else: + dot_out_dtype = tl.int32 + ab_dtype = True + if a.dtype in [tl.float8e4nv, tl.float8e5] and b.dtype in [tl.float8e4nv, tl.float8e5]: + ab_dtype = False + if a.dtype in [torch.int8] and b.dtype in [torch.int8]: + ab_dtype = False + # launch kernel + grid = lambda META: (cdiv(M, META['BLOCK_M']) * cdiv(N, META['BLOCK_N']), META['SPLIT_K']) + _kernel[grid]( + a, b, c, M, N, K, # + a.stride(0), a.stride(1), # + b.stride(0), b.stride(1), # + c.stride(0), c.stride(1), # + dot_out_dtype=dot_out_dtype, # + allow_tf32=allow_tf32, # + fp8_fast_accum=fp8_fast_accum, # + GROUP_M=8, AB_DTYPE=ab_dtype) + return c + + @staticmethod + def forward(ctx, a, b, dot_out_dtype=None, allow_tf32=True, fp8_fast_accum=True): + return _matmul._call(a, b, dot_out_dtype=dot_out_dtype, allow_tf32=allow_tf32, fp8_fast_accum=fp8_fast_accum) + + +matmul = _matmul.apply diff --git a/venv/lib/python3.10/site-packages/triton/ops/matmul_perf_model.py b/venv/lib/python3.10/site-packages/triton/ops/matmul_perf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..19e93268ec0b95dd61621c2e92dae8d99b4b32fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/ops/matmul_perf_model.py @@ -0,0 +1,162 @@ +import heapq + +import torch + +from .. import cdiv +from .._C.libtriton.triton import runtime +from ..runtime import driver +from ..testing import (get_dram_gbps, get_max_simd_tflops, get_max_tensorcore_tflops, nvsmi) + + +def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype): + ''' return compute throughput in TOPS ''' + total_warps = num_ctas * min(num_warps, 4) + num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs + cur_sm_clock = nvsmi(['clocks.max.sm'])[0] + tflops = min(num_subcores, total_warps) / num_subcores * get_max_tensorcore_tflops( + dtype, cur_sm_clock, backend, device) + return tflops + + +def get_simd_tflops(backend, device, num_ctas, num_warps, dtype): + ''' return compute throughput in TOPS ''' + total_warps = num_ctas * min(num_warps, 4) + num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs + cur_sm_clock = nvsmi(['clocks.max.sm'])[0] + tflops = min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, cur_sm_clock, backend, device) + return tflops + + +def get_tflops(backend, device, num_ctas, num_warps, dtype): + capability = torch.cuda.get_device_capability(device) + if capability[0] < 8 and dtype == torch.float32: + return get_simd_tflops(backend, device, num_ctas, num_warps, dtype) + return get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype) + + +def estimate_matmul_time( + # backend, device, + num_warps, num_stages, # + A, B, C, # + M, N, K, # + BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, # + debug=False, **kwargs # +): + ''' return estimated running time in ms + = max(compute, loading) + store ''' + backend = runtime.backend.CUDA + device = torch.cuda.current_device() + dtype = A.dtype + dtsize = A.element_size() + + num_cta_m = cdiv(M, BLOCK_M) + num_cta_n = cdiv(N, BLOCK_N) + num_cta_k = SPLIT_K + num_ctas = num_cta_m * num_cta_n * num_cta_k + + # If the input is smaller than the block size + M, N = max(M, BLOCK_M), max(N, BLOCK_N) + + # time to compute + total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS + tput = get_tflops(backend, device, num_ctas, num_warps, dtype) + compute_ms = total_ops / tput + + # time to load data + num_sm = driver.utils.get_device_properties(device)["multiprocessor_count"] + active_cta_ratio = min(1, num_ctas / num_sm) + active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate + active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% + dram_bw = get_dram_gbps(backend, device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s + l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?) + # assume 80% of (following) loads are in L2 cache + load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1)) + load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1) + load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1)) + load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1) + # total + total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB + total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024) + # loading time in ms + load_ms = total_dram / dram_bw + total_l2 / l2_bw + + # estimate storing time + store_bw = dram_bw * 0.6 # :o + store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB + if SPLIT_K == 1: + store_ms = store_c_dram / store_bw + else: + reduce_bw = store_bw + store_ms = store_c_dram / reduce_bw + # c.zero_() + zero_ms = M * N * 2 / (1024 * 1024) / store_bw + store_ms += zero_ms + + total_time_ms = max(compute_ms, load_ms) + store_ms + if debug: + print(f'Total time: {total_time_ms}ms, compute time: {compute_ms}ms, ' + f'loading time: {load_ms}ms, store time: {store_ms}ms, ' + f'Activate CTAs: {active_cta_ratio*100}%') + return total_time_ms + + +def early_config_prune(configs, named_args): + device = torch.cuda.current_device() + capability = torch.cuda.get_device_capability() + # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages + dtsize = named_args['A'].element_size() + dtype = named_args['A'].dtype + + # 1. make sure we have enough smem + pruned_configs = [] + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ + kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages + + max_shared_memory = driver.utils.get_device_properties(device)["max_shared_mem"] + required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize + if required_shared_memory <= max_shared_memory: + pruned_configs.append(config) + configs = pruned_configs + + # Some dtypes do not allow atomic_add + if dtype not in [torch.float16, torch.float32]: + configs = [config for config in configs if config.kwargs['SPLIT_K'] == 1] + + # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps) + configs_map = {} + for config in configs: + kw = config.kwargs + BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = \ + kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], kw['SPLIT_K'], config.num_warps, config.num_stages + + key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps) + if key in configs_map: + configs_map[key].append((config, num_stages)) + else: + configs_map[key] = [(config, num_stages)] + + pruned_configs = [] + for k, v in configs_map.items(): + BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k + if capability[0] >= 8: + # compute cycles (only works for ampere GPUs) + mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) + mma_cycles = mmas / min(4, num_warps) * 8 + + ldgsts_latency = 300 # Does this matter? + optimal_num_stages = ldgsts_latency / mma_cycles + + # nearest stages, prefer large #stages + nearest = heapq.nsmallest( + 2, v, key=lambda x: 10 + abs(x[1] - optimal_num_stages) + if (x[1] - optimal_num_stages) < 0 else x[1] - optimal_num_stages) + + for n in nearest: + pruned_configs.append(n[0]) + else: # Volta & Turing only supports num_stages <= 2 + random_config = v[0][0] + random_config.num_stages = 2 + pruned_configs.append(random_config) + return pruned_configs diff --git a/venv/lib/python3.10/site-packages/triton/testing.py b/venv/lib/python3.10/site-packages/triton/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..848db7c1b508bbe93636b10e88d88e23cbc35d2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/testing.py @@ -0,0 +1,493 @@ +import functools +import os +import subprocess +import sys +from contextlib import contextmanager +from typing import Any, Dict, List + +from . import language as tl +from ._C.libtriton.triton import runtime + + +def nvsmi(attrs): + attrs = ','.join(attrs) + cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits'] + out = subprocess.check_output(cmd) + ret = out.decode(sys.stdout.encoding).split(',') + ret = [int(x) for x in ret] + return ret + + +def do_bench_cudagraph(fn, rep=20, grad_to_none=None): + import torch + """ + Benchmark the runtime of the provided function. + + :param fn: Function to benchmark + :type fn: Callable + :param rep: Repetition time (in ms) + :type rep: int + :param grad_to_none: Reset the gradient of the provided tensor to None + :type grad_to_none: torch.tensor, optional + """ + if torch.cuda.current_stream() == torch.cuda.default_stream(): + raise RuntimeError("Cannot capture graph in default stream. Please use side stream in benchmark code.") + # warmup + fn() + # step 1 - we estimate the amount of time the kernel call takes + # NOTE: this estimate isn't super accurate because the GPU isn't warmed up at this point + # but it is probably good enough + if grad_to_none is not None: + for x in grad_to_none: + x.detach_() + x.requires_grad_(True) + x.grad = None + g = torch.cuda.CUDAGraph() + with torch.cuda.graph(g): + fn() + torch.cuda.synchronize() + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record() + g.replay() + end_event.record() + torch.cuda.synchronize() + estimate_ms = start_event.elapsed_time(end_event) + n_repeat = max(1, int(rep / estimate_ms)) + # step 2 - construct a cuda graph with `n_repeat` unrolled function calls to minimize + # host overhead + g = torch.cuda.CUDAGraph() + with torch.cuda.graph(g): + for i in range(n_repeat): + if grad_to_none is not None: + for x in grad_to_none: + x.grad = None + fn() + torch.cuda.synchronize() + # measure time and return + ret = [] + n_retries = 10 + for i in range(n_retries): + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record() + g.replay() + end_event.record() + torch.cuda.synchronize() + ret += [start_event.elapsed_time(end_event) / n_repeat] + return torch.mean(torch.tensor(ret)).item() + + +def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flush=True, return_mode="mean"): + assert return_mode in ["min", "max", "mean", "median"] + import torch + """ + Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with + the 20-th and 80-th performance percentile. + + :param fn: Function to benchmark + :type fn: Callable + :param warmup: Warmup time (in ms) + :type warmup: int + :param rep: Repetition time (in ms) + :type rep: int + :param grad_to_none: Reset the gradient of the provided tensor to None + :type grad_to_none: torch.tensor, optional + :param quantiles: Performance percentile to return in addition to the median. + :type quantiles: list[float] + :param fast_flush: Use faster kernel to flush L2 between measurements + :type fast_flush: bool + """ + + fn() + torch.cuda.synchronize() + + # We maintain a buffer of 256 MB that we clear + # before each kernel call to make sure that the L2 + # doesn't contain any input data before the run + if fast_flush: + cache = torch.empty(int(256e6 // 4), dtype=torch.int, device='cuda') + else: + cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda') + + # Estimate the runtime of the function + start_event = torch.cuda.Event(enable_timing=True) + end_event = torch.cuda.Event(enable_timing=True) + start_event.record() + for _ in range(5): + cache.zero_() + fn() + end_event.record() + torch.cuda.synchronize() + estimate_ms = start_event.elapsed_time(end_event) / 5 + + # compute number of warmup and repeat + n_warmup = max(1, int(warmup / estimate_ms)) + n_repeat = max(1, int(rep / estimate_ms)) + start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)] + end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)] + # Warm-up + for _ in range(n_warmup): + fn() + # Benchmark + for i in range(n_repeat): + # we don't want `fn` to accumulate gradient values + # if it contains a backward pass. So we clear the + # provided gradients + if grad_to_none is not None: + for x in grad_to_none: + x.grad = None + # we clear the L2 cache before each run + cache.zero_() + # record time of `fn` + start_event[i].record() + fn() + end_event[i].record() + # Record clocks + torch.cuda.synchronize() + times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)], dtype=torch.float) + if quantiles is not None: + ret = torch.quantile(times, torch.tensor(quantiles, dtype=torch.float)).tolist() + if len(ret) == 1: + ret = ret[0] + return ret + return getattr(torch, return_mode)(times).item() + + +def assert_close(x, y, atol=None, rtol=None, err_msg=''): + import numpy as np + import torch + + # canonicalize arguments to be tensors + if not isinstance(x, torch.Tensor): + x = torch.tensor(x) + if not isinstance(y, torch.Tensor): + y = torch.tensor(y) + # absolute tolerance + if atol is None: + atol = 1e-2 + atol = atol(x.dtype) if callable(atol) else atol + # relative tolerance hook + if rtol is None: + rtol = 0. + rtol = rtol(x.dtype) if callable(rtol) else rtol + # we use numpy instead of pytorch + # as it seems more memory efficient + # pytorch tends to oom on large tensors + if isinstance(x, torch.Tensor): + if x.dtype == torch.bfloat16: + x = x.float() + x = x.cpu().detach().numpy() + if isinstance(y, torch.Tensor): + if y.dtype == torch.bfloat16: + y = y.float() + y = y.cpu().detach().numpy() + # we handle size==1 case separately as we can + # provide better error message there + if x.size > 1 or y.size > 1: + np.testing.assert_allclose(x, y, atol=atol, rtol=rtol, equal_nan=True) + return + if not np.allclose(x, y, atol=atol, rtol=rtol): + raise AssertionError(f'{err_msg} {x} is not close to {y} (atol={atol}, rtol={rtol})') + + +class Benchmark: + """ + This class is used by the :code:`perf_report` function to generate line plots with a concise API. + """ + + def __init__( + self, + x_names: List[str], + x_vals: List[Any], + line_arg: str, + line_vals: List[Any], + line_names: List[str], + plot_name: str, + args: Dict[str, Any], + xlabel: str = '', + ylabel: str = '', + x_log: bool = False, + y_log: bool = False, + color=None, + styles=None, + ): + """ + Constructor. + x_vals can be a list of scalars or a list of tuples/lists. If x_vals is a list + of scalars and there are multiple x_names, all arguments will have the same value. + If x_vals is a list of tuples/lists, each element should have the same length as + x_names. + + :param x_names: Name of the arguments that should appear on the x axis of the plot. + :type x_names: List[str] + :param x_vals: List of values to use for the arguments in :code:`x_names`. + :type x_vals: List[Any] + :param line_arg: Argument name for which different values correspond to different lines in the plot. + :type line_arg: str + :param line_vals: List of values to use for the arguments in :code:`line_arg`. + :type line_vals: List[Any] + :param line_names: Label names for the different lines. + :type line_names: List[str] + :param plot_name: Name of the plot. + :type plot_name: str + :param args: Dictionary of keyword arguments to remain fixed throughout the benchmark. + :type args: Dict[str, Any] + :param xlabel: Label for the x axis of the plot. + :type xlabel: str, optional + :param ylabel: Label for the y axis of the plot. + :type ylabel: str, optional + :param x_log: Whether the x axis should be log scale. + :type x_log: bool, optional + :param y_log: Whether the y axis should be log scale. + :type y_log: bool, optional + """ + self.x_names = x_names + self.x_vals = x_vals + self.x_log = x_log + self.line_arg = line_arg + self.line_vals = line_vals + self.line_names = line_names + self.y_log = y_log + self.styles = styles + # plot info + self.xlabel = xlabel + self.ylabel = ylabel + self.plot_name = plot_name + self.args = args + + +class Mark: + + def __init__(self, fn, benchmarks): + self.fn = fn + self.benchmarks = benchmarks + + def _run(self, bench: Benchmark, save_path: str, show_plots: bool, print_data: bool, diff_col=False, **kwrags): + import os + + import matplotlib.pyplot as plt + import pandas as pd + y_mean = bench.line_names + y_min = [f'{x}-min' for x in bench.line_names] + y_max = [f'{x}-max' for x in bench.line_names] + x_names = list(bench.x_names) + df = pd.DataFrame(columns=x_names + y_mean + y_min + y_max) + for x in bench.x_vals: + # x can be a single value or a sequence of values. + if not isinstance(x, (list, tuple)): + x = [x for _ in x_names] + + if len(x) != len(x_names): + raise ValueError(f"Expected {len(x_names)} values, got {x}") + x_args = dict(zip(x_names, x)) + + row_mean, row_min, row_max = [], [], [] + for y in bench.line_vals: + ret = self.fn(**x_args, **{bench.line_arg: y}, **bench.args, **kwrags) + try: + y_mean, y_min, y_max = ret + except TypeError: + y_mean, y_min, y_max = ret, None, None + row_mean += [y_mean] + row_min += [y_min] + row_max += [y_max] + df.loc[len(df)] = list(x) + row_mean + row_min + row_max + + if bench.plot_name: + plt.figure() + ax = plt.subplot() + # Plot first x value on x axis if there are multiple. + first_x = x_names[0] + for i, y in enumerate(bench.line_names): + y_min, y_max = df[y + '-min'], df[y + '-max'] + col = bench.styles[i][0] if bench.styles else None + sty = bench.styles[i][1] if bench.styles else None + ax.plot(df[first_x], df[y], label=y, color=col, ls=sty) + if not y_min.isnull().all() and not y_max.isnull().all(): + y_min = y_min.astype(float) + y_max = y_max.astype(float) + ax.fill_between(df[first_x], y_min, y_max, alpha=0.15, color=col) + ax.legend() + ax.set_xlabel(bench.xlabel or first_x) + ax.set_ylabel(bench.ylabel) + # ax.set_title(bench.plot_name) + ax.set_xscale("log" if bench.x_log else "linear") + ax.set_yscale("log" if bench.y_log else "linear") + if show_plots: + plt.show() + if save_path: + plt.savefig(os.path.join(save_path, f"{bench.plot_name}.png")) + df = df[x_names + bench.line_names] + if diff_col and df.shape[1] == 2: + col0, col1 = df.columns.tolist() + df['Diff'] = df[col1] - df[col0] + + if print_data: + print(bench.plot_name + ':') + print(df) + if save_path: + df.to_csv(os.path.join(save_path, f"{bench.plot_name}.csv"), float_format='%.1f', index=False) + return df + + def run(self, show_plots=False, print_data=False, save_path='', return_df=False, **kwargs): + has_single_bench = isinstance(self.benchmarks, Benchmark) + benchmarks = [self.benchmarks] if has_single_bench else self.benchmarks + result_dfs = [] + if save_path: + html = open(os.path.join(save_path, "results.html"), "w") + html.write("\n") + for bench in benchmarks: + result_dfs.append(self._run(bench, save_path, show_plots, print_data, **kwargs)) + if save_path: + html.write(f"\n") + if save_path: + html.write("\n") + if return_df: + if has_single_bench: + return result_dfs[0] + else: + return result_dfs + return None + + +def perf_report(benchmarks): + """ + Mark a function for benchmarking. The benchmark can then be executed by using the :code:`.run` method on the return value. + + :param benchmarks: Benchmarking configurations. + :type benchmarks: List of :class:`Benchmark` + """ + wrapper = lambda fn: Mark(fn, benchmarks) + return wrapper + + +def get_dram_gbps(backend=None, device=None): + ''' return DRAM bandwidth in GB/s ''' + import torch + + from .runtime import driver + if not backend: + backend = runtime.backend.CUDA + if not device: + device = torch.cuda.current_device() + mem_clock_khz = driver.utils.get_device_properties(device)["mem_clock_rate"] # in kHz + bus_width = driver.utils.get_device_properties(device)["mem_bus_width"] + bw_gbps = mem_clock_khz * bus_width * 2 / 1e6 / 8 # In GB/s + return bw_gbps + + +def get_max_tensorcore_tflops(dtype, clock_rate, backend=None, device=None): + import torch + + from .runtime import driver + if not backend: + backend = runtime.backend.CUDA + if not device: + device = torch.cuda.current_device() + + num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 + capability = torch.cuda.get_device_capability(device) + if capability[0] < 8: + assert dtype == torch.float16 + ops_per_sub_core = 256 # 2 4x4x4 Tensor Cores + else: + if dtype in [torch.float32, torch.int32]: + ops_per_sub_core = 256 + elif dtype in [torch.float16, torch.bfloat16, torch.int16]: + ops_per_sub_core = 512 + elif dtype in [torch.int8, tl.float8e4nv, tl.float8e4b15, tl.float8e5]: + ops_per_sub_core = 1024 + else: + raise RuntimeError("dtype not supported") + tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9 + return tflops + + +# create decorator that wraps test function into +# a cuda-memcheck system call + + +def cuda_memcheck(**target_kwargs): + + def decorator(test_fn): + + @functools.wraps(test_fn) + def wrapper(*args, **kwargs): + import psutil + ppid_name = psutil.Process(os.getppid()).name() + run_cuda_memcheck = target_kwargs.items() <= kwargs.items() + if run_cuda_memcheck and ppid_name != "cuda-memcheck": + path = os.path.realpath(test_fn.__globals__["__file__"]) + # get path of current file + env = {"PATH": os.environ["PATH"], "PYTORCH_NO_CUDA_MEMORY_CACHING": "1"} + assert 'request' in kwargs, "memcheck'ed test must have a (possibly unused) `request` fixture" + test_id = kwargs['request'].node.callspec.id + cmd = f"{path}::{test_fn.__name__}[{test_id}]" + out = subprocess.run(["cuda-memcheck", "pytest", "-vs", cmd], capture_output=True, env=env) + assert out.returncode == 0, "cuda-memcheck returned an error: bounds checking failed" + assert "ERROR SUMMARY: 0 errors" in str(out.stdout) + else: + test_fn(*args, **kwargs) + + return wrapper + + return decorator + + +@contextmanager +def set_gpu_clock(ref_sm_clock=1350, ref_mem_clock=1215): + try: + subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "1"]) + subprocess.check_output([ + "nvidia-smi", + "-i", + "0", + f"--lock-gpu-clocks={ref_sm_clock},{ref_sm_clock}", + ]) + subprocess.check_output([ + "nvidia-smi", + "-i", + "0", + f"--lock-memory-clocks={ref_mem_clock},{ref_mem_clock}", + ]) + cur_sm_clock = nvsmi(["clocks.current.sm"])[0] + cur_mem_clock = nvsmi(["clocks.current.memory"])[0] + assert abs(cur_sm_clock - ref_sm_clock) < 10, f"GPU SMs must run at {ref_sm_clock} MHz" + assert abs(cur_mem_clock - ref_mem_clock) < 10, f"GPU SMs must run at {ref_mem_clock} MHz" + tflops = 1e-6 * 2 * 108 * 4 * 256 * ref_sm_clock + gbps = 640 * 2 * ref_mem_clock * 1e-3 + yield tflops, gbps + finally: + subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "0"]) + subprocess.check_output(["nvidia-smi", "-i", "0", "-rgc"]) + subprocess.check_output(["nvidia-smi", "-i", "0", "-rmc"]) + + +def get_max_simd_tflops(dtype, clock_rate, backend=None, device=None): + import torch + + from .runtime import driver + if not backend: + backend = runtime.backend.CUDA + if not device: + device = torch.cuda.current_device() + + num_subcores = driver.utils.get_device_properties(device)["multiprocessor_count"] * 4 + capability = torch.cuda.get_device_capability() + if capability[0] < 8: + if dtype == torch.float32: + ops_per_sub_core = 32 # 2*16 + elif dtype == torch.float16: + ops_per_sub_core = 64 + else: + raise RuntimeError("dtype not supported") + else: + if dtype == torch.float32: + ops_per_sub_core = 32 + elif dtype in [torch.float16, torch.bfloat16]: + ops_per_sub_core = 64 + else: + raise RuntimeError("dtype not supported") + tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9 + return tflops