diff --git a/ckpts/universal/global_step20/zero/19.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/19.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..bccfe12bb48f7bc4f89ad3ca83663528a8316a91 --- /dev/null +++ b/ckpts/universal/global_step20/zero/19.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b43eb6f99ab68ac98e876c37415f40533f0d361d1d39894c0adee066ad03c2 +size 16778396 diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0ad52cfc46bc0b2c4d98a16521a8de39500bdc2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682649 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..1ae111c4544f943c3a5c10aa4cd6cfe2b5033c46 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:30:50,317 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:30:54,905 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:30:54,907 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:30:54,908 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step120'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step120'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..bafe2e715e96eb3c382fe22e111f6bf32fc0b76b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:30:50.186000", + "startedAt": "2024-05-14T10:30:49.751162", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3397.410986842105, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.003, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.9223403930664 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..d40ed5b71282404024745e237578291ebce77e9a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-14 10:30:49,763 INFO StreamThr :7286 [internal.py:wandb_internal():85] W&B internal server running at pid: 7286, started at: 2024-05-14 10:30:49.763308 +2024-05-14 10:30:49,765 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:30:49,768 INFO WriterThread:7286 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/run-pkifitpz.wandb +2024-05-14 10:30:49,768 DEBUG SenderThread:7286 [sender.py:send():378] send: header +2024-05-14 10:30:49,784 DEBUG SenderThread:7286 [sender.py:send():378] send: run +2024-05-14 10:30:50,046 INFO SenderThread:7286 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files +2024-05-14 10:30:50,047 INFO SenderThread:7286 [sender.py:_start_run_threads():1123] run started: pkifitpz with start time 1715682649.763179 +2024-05-14 10:30:50,054 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:30:50,055 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:30:50,136 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:30:50,138 DEBUG HandlerThread:7286 [system_info.py:__init__():26] System info init +2024-05-14 10:30:50,138 DEBUG HandlerThread:7286 [system_info.py:__init__():41] System info init done +2024-05-14 10:30:50,138 INFO HandlerThread:7286 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:30:50,138 INFO SystemMonitor:7286 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:30:50,138 INFO HandlerThread:7286 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:30:50,138 INFO SystemMonitor:7286 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:30:50,138 INFO SystemMonitor:7286 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:30:50,139 INFO SystemMonitor:7286 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:30:50,139 INFO SystemMonitor:7286 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:30:50,185 DEBUG HandlerThread:7286 [system_info.py:probe():150] Probing system +2024-05-14 10:30:50,194 DEBUG HandlerThread:7286 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:30:50,214 ERROR HandlerThread:7286 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:30:50,214 DEBUG HandlerThread:7286 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:30:50,214 DEBUG HandlerThread:7286 [system_info.py:probe():198] Probing system done +2024-05-14 10:30:50,214 DEBUG HandlerThread:7286 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:50.186000', 'startedAt': '2024-05-14T10:30:49.751162', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3397.410986842105, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.003, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.9223403930664}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:30:50,214 INFO HandlerThread:7286 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:30:50,214 INFO HandlerThread:7286 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:30:50,215 INFO HandlerThread:7286 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:30:50,219 DEBUG SenderThread:7286 [sender.py:send():378] send: files +2024-05-14 10:30:50,219 INFO SenderThread:7286 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:30:50,313 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:30:50,314 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:30:50,314 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:30:50,315 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:30:50,464 DEBUG SenderThread:7286 [sender.py:send():378] send: telemetry +2024-05-14 10:30:50,742 INFO wandb-upload_0:7286 [upload_job.py:push():130] Uploaded file /tmp/tmp7ia6v275wandb/8nppt0ki-wandb-metadata.json +2024-05-14 10:30:51,048 INFO Thread-12 :7286 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt +2024-05-14 10:30:51,048 INFO Thread-12 :7286 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log +2024-05-14 10:30:51,049 INFO Thread-12 :7286 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-metadata.json +2024-05-14 10:30:53,048 INFO Thread-12 :7286 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log +2024-05-14 10:30:54,906 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:56,016 DEBUG SenderThread:7286 [sender.py:send():378] send: exit +2024-05-14 10:30:56,016 INFO SenderThread:7286 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:30:56,016 INFO SenderThread:7286 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:send_exit():593] send defer +2024-05-14 10:30:56,018 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,018 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:30:56,018 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:30:56,018 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,018 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:30:56,018 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:30:56,018 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:30:56,018 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,018 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:30:56,018 INFO HandlerThread:7286 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:30:56,019 DEBUG SystemMonitor:7286 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:30:56,019 INFO HandlerThread:7286 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:30:56,019 DEBUG SystemMonitor:7286 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:30:56,019 INFO HandlerThread:7286 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:30:56,020 DEBUG SystemMonitor:7286 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:30:56,020 INFO HandlerThread:7286 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:30:56,021 INFO HandlerThread:7286 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:30:56,021 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,021 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:30:56,021 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:30:56,021 DEBUG SenderThread:7286 [sender.py:send():378] send: stats +2024-05-14 10:30:56,021 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,022 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:30:56,022 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,022 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:30:56,022 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:30:56,022 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,022 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:30:56,022 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,022 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:30:56,022 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:30:56,022 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,022 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:30:56,023 DEBUG SenderThread:7286 [sender.py:send():378] send: summary +2024-05-14 10:30:56,023 INFO SenderThread:7286 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:30:56,023 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,023 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:30:56,023 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:30:56,024 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,024 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:30:56,024 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,024 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:30:56,026 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:30:56,059 INFO Thread-12 :7286 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json +2024-05-14 10:30:56,092 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:30:56,092 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:56,092 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:30:56,092 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:56,092 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:30:57,016 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:57,060 INFO Thread-12 :7286 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml +2024-05-14 10:30:57,060 INFO Thread-12 :7286 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log +2024-05-14 10:30:58,484 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:30:58,484 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:58,485 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:58,485 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:30:58,485 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:58,485 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:30:58,485 INFO SenderThread:7286 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:30:58,485 INFO SenderThread:7286 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:30:58,485 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:30:58,485 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:58,485 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:30:58,486 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:58,486 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:30:58,486 INFO SenderThread:7286 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:30:59,017 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:30:59,061 INFO SenderThread:7286 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log +2024-05-14 10:30:59,061 INFO SenderThread:7286 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files +2024-05-14 10:30:59,061 INFO SenderThread:7286 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:30:59,061 INFO SenderThread:7286 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt requirements.txt +2024-05-14 10:30:59,062 INFO SenderThread:7286 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json wandb-summary.json +2024-05-14 10:30:59,062 INFO SenderThread:7286 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml config.yaml +2024-05-14 10:30:59,062 INFO SenderThread:7286 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log output.log +2024-05-14 10:30:59,062 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:30:59,062 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:30:59,064 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:59,065 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:30:59,068 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:59,068 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:30:59,068 INFO SenderThread:7286 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:30:59,322 INFO wandb-upload_0:7286 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/wandb-summary.json +2024-05-14 10:30:59,466 INFO wandb-upload_1:7286 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/requirements.txt +2024-05-14 10:30:59,564 INFO wandb-upload_3:7286 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/output.log +2024-05-14 10:30:59,569 INFO wandb-upload_2:7286 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/files/config.yaml +2024-05-14 10:30:59,769 INFO Thread-11 (_thread_body):7286 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:30:59,770 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:59,770 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:30:59,770 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:59,770 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:30:59,770 INFO SenderThread:7286 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:30:59,771 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:30:59,771 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:30:59,771 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:30:59,771 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:30:59,771 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:30:59,771 INFO SenderThread:7286 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:00,010 INFO SenderThread:7286 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:00,010 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:00,010 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,010 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:00,011 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,011 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:00,011 INFO SenderThread:7286 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:00,011 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:00,011 INFO HandlerThread:7286 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:00,011 DEBUG SenderThread:7286 [sender.py:send():378] send: final +2024-05-14 10:31:00,011 DEBUG SenderThread:7286 [sender.py:send():378] send: footer +2024-05-14 10:31:00,011 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:00,011 INFO SenderThread:7286 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:00,012 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:00,012 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:00,012 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:00,012 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:00,012 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:00,012 DEBUG SenderThread:7286 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:00,014 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:00,014 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:00,014 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:00,088 INFO MainThread:7286 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:00,088 INFO MainThread:7286 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:00,088 INFO MainThread:7286 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:00,089 DEBUG HandlerThread:7286 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:00,089 INFO HandlerThread:7286 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:01,012 INFO WriterThread:7286 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/run-pkifitpz.wandb +2024-05-14 10:31:01,088 INFO SenderThread:7286 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:01,088 INFO SenderThread:7286 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:01,088 INFO SenderThread:7286 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..1311f42041168b8e60f97b546d11b8833ee30284 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Configure stats pid to 6970 +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:30:49,760 WARNING MainThread:6970 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug.log +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/logs/debug-internal.log +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:init():610] starting backend +2024-05-14 10:30:49,760 INFO MainThread:6970 [wandb_init.py:init():614] setting up manager +2024-05-14 10:30:49,762 INFO MainThread:6970 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:30:49,763 INFO MainThread:6970 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:30:49,767 INFO MainThread:6970 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:30:49,783 INFO MainThread:6970 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:30:50,054 INFO MainThread:6970 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:30:50,131 INFO MainThread:6970 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:30:50,131 INFO MainThread:6970 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:30:50,314 INFO MainThread:6970 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:30:50,314 INFO MainThread:6970 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:30:50,314 INFO MainThread:6970 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:30:50,314 INFO MainThread:6970 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:30:50,315 INFO MainThread:6970 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 10:31:01,089 WARNING MsgRouterThr:6970 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/run-pkifitpz.wandb b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/run-pkifitpz.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e24b8ba4713526ea69181cdbaeeeb52b00a8b104 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103049-pkifitpz/run-pkifitpz.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57e40fdeb557f8826a74919741b8f08236b8c7ad --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682677 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..ae5654310221f7d4c46e7cf665813f3c8fb292d2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:31:17,671 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:31:22,124 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:31:22,126 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:31:22,126 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step20'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step20'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..65f3b15f5ffeefc52cd6dc8235fddd558e9cc2b0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:31:17.539352", + "startedAt": "2024-05-14T10:31:17.152189", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3392.0137499999996, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.991, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3296.363, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.92284774780273 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..32789e007cc6d28bc7a698046d3b686bda070c22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 10:31:17,163 INFO StreamThr :9988 [internal.py:wandb_internal():85] W&B internal server running at pid: 9988, started at: 2024-05-14 10:31:17.162598 +2024-05-14 10:31:17,165 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:31:17,166 INFO WriterThread:9988 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/run-axf5cla8.wandb +2024-05-14 10:31:17,167 DEBUG SenderThread:9988 [sender.py:send():378] send: header +2024-05-14 10:31:17,176 DEBUG SenderThread:9988 [sender.py:send():378] send: run +2024-05-14 10:31:17,401 INFO SenderThread:9988 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files +2024-05-14 10:31:17,401 INFO SenderThread:9988 [sender.py:_start_run_threads():1123] run started: axf5cla8 with start time 1715682677.162398 +2024-05-14 10:31:17,407 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:31:17,407 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:31:17,506 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:31:17,507 DEBUG HandlerThread:9988 [system_info.py:__init__():26] System info init +2024-05-14 10:31:17,507 DEBUG HandlerThread:9988 [system_info.py:__init__():41] System info init done +2024-05-14 10:31:17,507 INFO HandlerThread:9988 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:31:17,508 INFO SystemMonitor:9988 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:31:17,508 INFO HandlerThread:9988 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:31:17,508 INFO SystemMonitor:9988 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:31:17,508 INFO SystemMonitor:9988 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:31:17,509 INFO SystemMonitor:9988 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:31:17,509 INFO SystemMonitor:9988 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:31:17,539 DEBUG HandlerThread:9988 [system_info.py:probe():150] Probing system +2024-05-14 10:31:17,547 DEBUG HandlerThread:9988 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:31:17,567 ERROR HandlerThread:9988 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:31:17,567 DEBUG HandlerThread:9988 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:31:17,567 DEBUG HandlerThread:9988 [system_info.py:probe():198] Probing system done +2024-05-14 10:31:17,567 DEBUG HandlerThread:9988 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:31:17.539352', 'startedAt': '2024-05-14T10:31:17.152189', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.0137499999996, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.997, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.991, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3296.363, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92284774780273}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:31:17,567 INFO HandlerThread:9988 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:31:17,567 INFO HandlerThread:9988 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:31:17,569 INFO HandlerThread:9988 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:31:17,572 DEBUG SenderThread:9988 [sender.py:send():378] send: files +2024-05-14 10:31:17,572 INFO SenderThread:9988 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:31:17,668 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:31:17,668 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:31:17,669 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:31:17,669 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:31:17,815 DEBUG SenderThread:9988 [sender.py:send():378] send: telemetry +2024-05-14 10:31:18,088 INFO wandb-upload_0:9988 [upload_job.py:push():130] Uploaded file /tmp/tmpu91_fkxgwandb/0upx8f5z-wandb-metadata.json +2024-05-14 10:31:18,402 INFO Thread-12 :9988 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log +2024-05-14 10:31:18,402 INFO Thread-12 :9988 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt +2024-05-14 10:31:18,402 INFO Thread-12 :9988 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-metadata.json +2024-05-14 10:31:20,402 INFO Thread-12 :9988 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log +2024-05-14 10:31:23,127 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,217 DEBUG SenderThread:9988 [sender.py:send():378] send: exit +2024-05-14 10:31:23,217 INFO SenderThread:9988 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:31:23,217 INFO SenderThread:9988 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:31:23,218 INFO SenderThread:9988 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,218 INFO SenderThread:9988 [sender.py:send_exit():593] send defer +2024-05-14 10:31:23,218 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,219 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:31:23,219 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,219 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:31:23,219 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:31:23,219 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,219 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:31:23,219 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,219 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:31:23,219 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:31:23,219 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,219 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:31:23,219 INFO HandlerThread:9988 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:31:23,219 DEBUG SystemMonitor:9988 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:31:23,220 DEBUG SystemMonitor:9988 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:31:23,220 INFO HandlerThread:9988 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:31:23,220 DEBUG SystemMonitor:9988 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:31:23,220 INFO HandlerThread:9988 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:31:23,221 INFO HandlerThread:9988 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:31:23,221 INFO HandlerThread:9988 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:31:23,222 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,222 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:31:23,222 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:31:23,222 DEBUG SenderThread:9988 [sender.py:send():378] send: stats +2024-05-14 10:31:23,222 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,222 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:31:23,223 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,223 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:31:23,223 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:31:23,223 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,223 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:31:23,223 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,223 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:31:23,223 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:31:23,223 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,223 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:31:23,223 DEBUG SenderThread:9988 [sender.py:send():378] send: summary +2024-05-14 10:31:23,224 INFO SenderThread:9988 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,224 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,224 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:31:23,224 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:31:23,224 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,224 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:31:23,224 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,224 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:31:23,227 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,404 INFO Thread-12 :9988 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json +2024-05-14 10:31:23,592 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:31:23,592 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,592 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:31:23,592 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,592 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:31:23,835 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:31:23,835 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,835 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:31:23,836 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,836 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:31:23,836 INFO SenderThread:9988 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:31:23,836 INFO SenderThread:9988 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:31:23,836 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:31:23,836 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,836 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:31:23,836 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,836 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:31:23,836 INFO SenderThread:9988 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:31:24,217 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:24,405 INFO SenderThread:9988 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log +2024-05-14 10:31:24,405 INFO SenderThread:9988 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml +2024-05-14 10:31:24,405 INFO SenderThread:9988 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files +2024-05-14 10:31:24,406 INFO SenderThread:9988 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:31:24,406 INFO SenderThread:9988 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml config.yaml +2024-05-14 10:31:24,406 INFO SenderThread:9988 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json wandb-summary.json +2024-05-14 10:31:24,406 INFO SenderThread:9988 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log output.log +2024-05-14 10:31:24,408 INFO SenderThread:9988 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt requirements.txt +2024-05-14 10:31:24,409 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:31:24,409 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:24,412 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:24,412 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:31:24,412 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:24,413 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:31:24,413 INFO SenderThread:9988 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:24,647 INFO wandb-upload_1:9988 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/config.yaml +2024-05-14 10:31:24,829 INFO wandb-upload_0:9988 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/wandb-summary.json +2024-05-14 10:31:24,877 INFO wandb-upload_3:9988 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/requirements.txt +2024-05-14 10:31:24,889 INFO wandb-upload_2:9988 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/files/output.log +2024-05-14 10:31:25,090 INFO Thread-11 (_thread_body):9988 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:31:25,090 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,090 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:31:25,091 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,091 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:31:25,091 INFO SenderThread:9988 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:31:25,091 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:31:25,091 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,091 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:31:25,091 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,091 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:31:25,091 INFO SenderThread:9988 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:25,217 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,552 INFO SenderThread:9988 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:25,552 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:25,552 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,552 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,552 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:25,552 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,553 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:25,553 INFO SenderThread:9988 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:25,553 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,553 DEBUG SenderThread:9988 [sender.py:send():378] send: final +2024-05-14 10:31:25,553 INFO HandlerThread:9988 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:25,553 DEBUG SenderThread:9988 [sender.py:send():378] send: footer +2024-05-14 10:31:25,553 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,553 INFO SenderThread:9988 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:25,553 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,553 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,554 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,554 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:25,554 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,554 DEBUG SenderThread:9988 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:25,555 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:25,555 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:25,556 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:25,608 INFO MainThread:9988 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:25,608 INFO MainThread:9988 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:25,608 INFO MainThread:9988 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:25,609 DEBUG HandlerThread:9988 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:25,609 INFO HandlerThread:9988 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:26,554 INFO WriterThread:9988 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/run-axf5cla8.wandb +2024-05-14 10:31:26,608 INFO SenderThread:9988 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:26,608 INFO SenderThread:9988 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:26,608 INFO SenderThread:9988 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..954b9c6afca558ff8e1399669f8f9dd1ebfe2980 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Configure stats pid to 8779 +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:31:17,159 WARNING MainThread:8779 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug.log +2024-05-14 10:31:17,159 INFO MainThread:8779 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/logs/debug-internal.log +2024-05-14 10:31:17,160 INFO MainThread:8779 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:31:17,160 INFO MainThread:8779 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:31:17,160 INFO MainThread:8779 [wandb_init.py:init():610] starting backend +2024-05-14 10:31:17,160 INFO MainThread:8779 [wandb_init.py:init():614] setting up manager +2024-05-14 10:31:17,161 INFO MainThread:8779 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:31:17,162 INFO MainThread:8779 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:31:17,165 INFO MainThread:8779 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:31:17,176 INFO MainThread:8779 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:31:17,406 INFO MainThread:8779 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:31:17,501 INFO MainThread:8779 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:31:17,501 INFO MainThread:8779 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:31:17,669 INFO MainThread:8779 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:31:17,669 INFO MainThread:8779 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:31:17,669 INFO MainThread:8779 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:31:17,669 INFO MainThread:8779 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:31:17,670 INFO MainThread:8779 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/run-axf5cla8.wandb b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/run-axf5cla8.wandb new file mode 100644 index 0000000000000000000000000000000000000000..1d9833583b8dae7545bb0a81986e52acf6d4a02a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103117-axf5cla8/run-axf5cla8.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57e40fdeb557f8826a74919741b8f08236b8c7ad --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715682677 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..fb886ba0efc7e59fec2a3c4b2c1b6dc6966b7793 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log @@ -0,0 +1,42 @@ + +2024-05-14:10:31:17,594 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:10:31:22,022 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:10:31:22,024 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:10:31:22,024 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'} +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step20'. Use `repo_type` argument if needed. +The above exception was the direct cause of the following exception: +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step20'. Please provide either the path to a local folder or the repo_id of a model on the Hub. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..6b16df9085442f8f1501da7b9dc0d60d3e593374 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T10:31:17.463159", + "startedAt": "2024-05-14T10:31:17.037121", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3393.778907894737, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3293.042, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3293.042, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3261.965, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.459, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 76.9227523803711 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..819ae609606ca6f72966ebb7a3d2fac60990499b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-14 10:31:17,048 INFO StreamThr :10009 [internal.py:wandb_internal():85] W&B internal server running at pid: 10009, started at: 2024-05-14 10:31:17.047572 +2024-05-14 10:31:17,050 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: status +2024-05-14 10:31:17,051 INFO WriterThread:10009 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/run-chuadj5b.wandb +2024-05-14 10:31:17,052 DEBUG SenderThread:10009 [sender.py:send():378] send: header +2024-05-14 10:31:17,062 DEBUG SenderThread:10009 [sender.py:send():378] send: run +2024-05-14 10:31:17,299 INFO SenderThread:10009 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files +2024-05-14 10:31:17,300 INFO SenderThread:10009 [sender.py:_start_run_threads():1123] run started: chuadj5b with start time 1715682677.047457 +2024-05-14 10:31:17,306 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 10:31:17,306 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: check_version +2024-05-14 10:31:17,391 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 10:31:17,393 DEBUG HandlerThread:10009 [system_info.py:__init__():26] System info init +2024-05-14 10:31:17,393 DEBUG HandlerThread:10009 [system_info.py:__init__():41] System info init done +2024-05-14 10:31:17,393 INFO HandlerThread:10009 [system_monitor.py:start():194] Starting system monitor +2024-05-14 10:31:17,393 INFO SystemMonitor:10009 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 10:31:17,393 INFO HandlerThread:10009 [system_monitor.py:probe():214] Collecting system info +2024-05-14 10:31:17,394 INFO SystemMonitor:10009 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 10:31:17,396 INFO SystemMonitor:10009 [interfaces.py:start():188] Started disk monitoring +2024-05-14 10:31:17,396 INFO SystemMonitor:10009 [interfaces.py:start():188] Started memory monitoring +2024-05-14 10:31:17,397 INFO SystemMonitor:10009 [interfaces.py:start():188] Started network monitoring +2024-05-14 10:31:17,463 DEBUG HandlerThread:10009 [system_info.py:probe():150] Probing system +2024-05-14 10:31:17,471 DEBUG HandlerThread:10009 [system_info.py:_probe_git():135] Probing git +2024-05-14 10:31:17,490 ERROR HandlerThread:10009 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 10:31:17,490 DEBUG HandlerThread:10009 [system_info.py:_probe_git():143] Probing git done +2024-05-14 10:31:17,490 DEBUG HandlerThread:10009 [system_info.py:probe():198] Probing system done +2024-05-14 10:31:17,490 DEBUG HandlerThread:10009 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:31:17.463159', 'startedAt': '2024-05-14T10:31:17.037121', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3393.778907894737, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3293.042, 'min': 800.0, 'max': 3400.0}, {'current': 3293.042, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3261.965, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.459, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.9227523803711}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 10:31:17,490 INFO HandlerThread:10009 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 10:31:17,490 INFO HandlerThread:10009 [system_monitor.py:probe():227] Publishing system info +2024-05-14 10:31:17,492 INFO HandlerThread:10009 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 10:31:17,495 DEBUG SenderThread:10009 [sender.py:send():378] send: files +2024-05-14 10:31:17,495 INFO SenderThread:10009 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 10:31:17,591 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 10:31:17,591 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 10:31:17,592 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: python_packages +2024-05-14 10:31:17,593 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: stop_status +2024-05-14 10:31:17,745 DEBUG SenderThread:10009 [sender.py:send():378] send: telemetry +2024-05-14 10:31:18,038 INFO wandb-upload_0:10009 [upload_job.py:push():130] Uploaded file /tmp/tmprdkk0lwvwandb/x5wwed0c-wandb-metadata.json +2024-05-14 10:31:18,301 INFO Thread-12 :10009 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt +2024-05-14 10:31:18,301 INFO Thread-12 :10009 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log +2024-05-14 10:31:18,301 INFO Thread-12 :10009 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-metadata.json +2024-05-14 10:31:20,312 INFO Thread-12 :10009 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log +2024-05-14 10:31:23,025 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,148 DEBUG SenderThread:10009 [sender.py:send():378] send: exit +2024-05-14 10:31:23,148 INFO SenderThread:10009 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 10:31:23,148 INFO SenderThread:10009 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 10:31:23,149 INFO SenderThread:10009 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,150 INFO SenderThread:10009 [sender.py:send_exit():593] send defer +2024-05-14 10:31:23,150 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,150 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 10:31:23,150 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,150 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 10:31:23,150 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 1 +2024-05-14 10:31:23,150 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,150 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 10:31:23,150 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,150 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 10:31:23,150 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 2 +2024-05-14 10:31:23,151 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 10:31:23,151 INFO HandlerThread:10009 [interfaces.py:finish():200] Joined network monitor +2024-05-14 10:31:23,151 DEBUG SystemMonitor:10009 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 10:31:23,152 DEBUG SystemMonitor:10009 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 10:31:23,152 DEBUG SystemMonitor:10009 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 10:31:23,153 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,153 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 10:31:23,153 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 3 +2024-05-14 10:31:23,153 DEBUG SenderThread:10009 [sender.py:send():378] send: stats +2024-05-14 10:31:23,154 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,154 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 10:31:23,154 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,154 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 10:31:23,154 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 4 +2024-05-14 10:31:23,154 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,154 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 10:31:23,154 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,154 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 10:31:23,154 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 5 +2024-05-14 10:31:23,155 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,155 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 10:31:23,155 DEBUG SenderThread:10009 [sender.py:send():378] send: summary +2024-05-14 10:31:23,155 INFO SenderThread:10009 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 10:31:23,156 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,156 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 10:31:23,156 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 6 +2024-05-14 10:31:23,156 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,156 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 10:31:23,156 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,156 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 10:31:23,158 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 10:31:23,314 INFO Thread-12 :10009 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json +2024-05-14 10:31:23,406 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 7 +2024-05-14 10:31:23,406 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,406 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 10:31:23,406 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,406 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 10:31:23,764 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 8 +2024-05-14 10:31:23,765 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,765 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 10:31:23,765 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,765 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 10:31:23,765 INFO SenderThread:10009 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 10:31:23,765 INFO SenderThread:10009 [job_builder.py:_get_source_type():576] no source found +2024-05-14 10:31:23,766 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 9 +2024-05-14 10:31:23,766 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:23,766 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 10:31:23,766 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:23,766 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 10:31:23,766 INFO SenderThread:10009 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 10:31:24,148 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:24,314 INFO SenderThread:10009 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-metadata.json wandb-metadata.json +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log output.log +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json wandb-summary.json +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt requirements.txt +2024-05-14 10:31:24,315 INFO SenderThread:10009 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml config.yaml +2024-05-14 10:31:24,317 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 10 +2024-05-14 10:31:24,317 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:24,320 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:24,321 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 10:31:24,322 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:24,322 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 10:31:24,322 INFO SenderThread:10009 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:24,559 INFO wandb-upload_1:10009 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/output.log +2024-05-14 10:31:24,728 INFO wandb-upload_0:10009 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/wandb-summary.json +2024-05-14 10:31:24,797 INFO wandb-upload_3:10009 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/config.yaml +2024-05-14 10:31:24,801 INFO wandb-upload_2:10009 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/files/requirements.txt +2024-05-14 10:31:25,002 INFO Thread-11 (_thread_body):10009 [sender.py:transition_state():613] send defer: 11 +2024-05-14 10:31:25,002 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,002 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 10:31:25,002 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,003 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 10:31:25,003 INFO SenderThread:10009 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 10:31:25,003 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 12 +2024-05-14 10:31:25,003 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,003 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 10:31:25,003 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,003 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 10:31:25,003 INFO SenderThread:10009 [file_stream.py:finish():601] file stream finish called +2024-05-14 10:31:25,149 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,568 INFO SenderThread:10009 [file_stream.py:finish():605] file stream finish is done +2024-05-14 10:31:25,568 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 13 +2024-05-14 10:31:25,568 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,569 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,569 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 10:31:25,569 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,569 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 10:31:25,569 INFO SenderThread:10009 [sender.py:transition_state():613] send defer: 14 +2024-05-14 10:31:25,569 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: defer +2024-05-14 10:31:25,569 INFO HandlerThread:10009 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 10:31:25,569 DEBUG SenderThread:10009 [sender.py:send():378] send: final +2024-05-14 10:31:25,569 DEBUG SenderThread:10009 [sender.py:send():378] send: footer +2024-05-14 10:31:25,569 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: defer +2024-05-14 10:31:25,569 INFO SenderThread:10009 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 10:31:25,570 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,570 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,570 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 10:31:25,570 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 10:31:25,571 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 10:31:25,571 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 10:31:25,571 DEBUG SenderThread:10009 [sender.py:send_request():405] send_request: server_info +2024-05-14 10:31:25,571 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 10:31:25,572 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 10:31:25,624 INFO MainThread:10009 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 10:31:25,625 INFO MainThread:10009 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 10:31:25,625 INFO MainThread:10009 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 10:31:25,625 DEBUG HandlerThread:10009 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 10:31:25,625 INFO HandlerThread:10009 [handler.py:finish():882] shutting down handler +2024-05-14 10:31:26,571 INFO WriterThread:10009 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/run-chuadj5b.wandb +2024-05-14 10:31:26,624 INFO SenderThread:10009 [sender.py:finish():1545] shutting down sender +2024-05-14 10:31:26,624 INFO SenderThread:10009 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 10:31:26,624 INFO SenderThread:10009 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..e42066cc4991a518619cb2993b573c50c4ed80ad --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Configure stats pid to 8783 +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 10:31:17,044 WARNING MainThread:8783 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 10:31:17,044 INFO MainThread:8783 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug.log +2024-05-14 10:31:17,045 INFO MainThread:8783 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/logs/debug-internal.log +2024-05-14 10:31:17,045 INFO MainThread:8783 [wandb_init.py:init():560] calling init triggers +2024-05-14 10:31:17,045 INFO MainThread:8783 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 10:31:17,045 INFO MainThread:8783 [wandb_init.py:init():610] starting backend +2024-05-14 10:31:17,045 INFO MainThread:8783 [wandb_init.py:init():614] setting up manager +2024-05-14 10:31:17,046 INFO MainThread:8783 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 10:31:17,047 INFO MainThread:8783 [wandb_init.py:init():622] backend started and connected +2024-05-14 10:31:17,050 INFO MainThread:8783 [wandb_init.py:init():711] updated telemetry +2024-05-14 10:31:17,061 INFO MainThread:8783 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 10:31:17,305 INFO MainThread:8783 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 10:31:17,387 INFO MainThread:8783 [wandb_run.py:_on_init():2405] got version response +2024-05-14 10:31:17,387 INFO MainThread:8783 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 10:31:17,592 INFO MainThread:8783 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 10:31:17,592 INFO MainThread:8783 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 10:31:17,592 INFO MainThread:8783 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 10:31:17,592 INFO MainThread:8783 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 10:31:17,593 INFO MainThread:8783 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/run-chuadj5b.wandb b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/run-chuadj5b.wandb new file mode 100644 index 0000000000000000000000000000000000000000..dcc811606665f18a2a2ee7200920f6628ad46f3a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_103117-chuadj5b/run-chuadj5b.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/files/output.log b/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..fd036eee26562502ef55659a7298dab7e140b18a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:11:41:40,930 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:11:41:46,726 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:11:41:46,728 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:11:41:46,728 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:11:41:58,080 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:11:41:58,080 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +[2024-05-14 11:41:57,671] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:11:41:59,561 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:41:59,561 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:11:41:59,580 INFO [task.py:395] Building contexts for indiccopa-hi on rank 4... +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 111848.11it/s] +2024-05-14:11:42:01,857 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Passed argument batch_size = auto:1. Detecting largest batch size +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/run-8luddtza.wandb b/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/run-8luddtza.wandb new file mode 100644 index 0000000000000000000000000000000000000000..f1fd2ba76841b8eacebb50f80c991a2b55273496 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_114140-8luddtza/run-8luddtza.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..299eb55ae9fb758d9b2420d75d18ee2816d408fb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/files/config.yaml @@ -0,0 +1,32 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715686992 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/run-tau13xz6.wandb b/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/run-tau13xz6.wandb new file mode 100644 index 0000000000000000000000000000000000000000..6f9cffbe99aa33b59c0f6bc8be7d3b8686359b21 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_114312-tau13xz6/run-tau13xz6.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25579a2672964cab8fb42cfd1dc11880b206d7d9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.0 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716403466 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.0 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..658421a5f9aa437039b9835037395b8109f490c2 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log @@ -0,0 +1,34 @@ + +2024-05-22:18:44:27,725 INFO [__main__.py:251] Verbosity set to INFO +2024-05-22:18:44:36,139 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-22:18:44:36,140 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-22:18:44:36,140 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'} +2024-05-22:18:44:38,432 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..b1e8fbdb52f07f19202d89287bc1a765185220aa --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-22T18:44:27.522605", + "startedAt": "2024-05-22T18:44:26.934452", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2325.8164625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3389.444, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3389.444, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3234.398, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3385.951, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.6398582458496 + } + }, + "memory": { + "total": 1007.4379997253418 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..bee5d62e086269ee6a96533f7e3596a539256a9e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 10}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..898c993e9545f748bc9827b5342585e88a10185d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 18:44:26,958 INFO StreamThr :1569 [internal.py:wandb_internal():85] W&B internal server running at pid: 1569, started at: 2024-05-22 18:44:26.955010 +2024-05-22 18:44:26,961 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:44:26,962 INFO WriterThread:1569 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/run-x9m2mvtt.wandb +2024-05-22 18:44:26,964 DEBUG SenderThread:1569 [sender.py:send():378] send: header +2024-05-22 18:44:26,967 DEBUG SenderThread:1569 [sender.py:send():378] send: run +2024-05-22 18:44:27,281 INFO SenderThread:1569 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files +2024-05-22 18:44:27,281 INFO SenderThread:1569 [sender.py:_start_run_threads():1123] run started: x9m2mvtt with start time 1716403466.954859 +2024-05-22 18:44:27,285 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:44:27,285 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:44:27,461 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:44:27,463 DEBUG HandlerThread:1569 [system_info.py:__init__():26] System info init +2024-05-22 18:44:27,463 DEBUG HandlerThread:1569 [system_info.py:__init__():41] System info init done +2024-05-22 18:44:27,463 INFO HandlerThread:1569 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:44:27,464 INFO SystemMonitor:1569 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:44:27,464 INFO HandlerThread:1569 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:44:27,493 INFO SystemMonitor:1569 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:44:27,494 INFO SystemMonitor:1569 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:44:27,494 INFO SystemMonitor:1569 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:44:27,495 INFO SystemMonitor:1569 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:44:27,522 DEBUG HandlerThread:1569 [system_info.py:probe():150] Probing system +2024-05-22 18:44:27,526 DEBUG HandlerThread:1569 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:44:27,535 ERROR HandlerThread:1569 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:44:27,535 DEBUG HandlerThread:1569 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:44:27,535 DEBUG HandlerThread:1569 [system_info.py:probe():198] Probing system done +2024-05-22 18:44:27,535 DEBUG HandlerThread:1569 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:44:27.522605', 'startedAt': '2024-05-22T18:44:26.934452', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2325.8164625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3389.444, 'min': 800.0, 'max': 3400.0}, {'current': 3389.444, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3234.398, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3385.951, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6398582458496}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:44:27,536 INFO HandlerThread:1569 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:44:27,536 INFO HandlerThread:1569 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:44:27,539 INFO HandlerThread:1569 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:44:27,544 DEBUG SenderThread:1569 [sender.py:send():378] send: files +2024-05-22 18:44:27,544 INFO SenderThread:1569 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:44:27,718 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:44:27,718 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:44:27,719 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:44:27,721 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:44:27,812 DEBUG SenderThread:1569 [sender.py:send():378] send: telemetry +2024-05-22 18:44:28,224 INFO wandb-upload_0:1569 [upload_job.py:push():130] Uploaded file /tmp/tmpnu_he00mwandb/7zp1za0g-wandb-metadata.json +2024-05-22 18:44:28,284 INFO Thread-12 :1569 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-metadata.json +2024-05-22 18:44:28,284 INFO Thread-12 :1569 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log +2024-05-22 18:44:28,284 INFO Thread-12 :1569 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt +2024-05-22 18:44:30,283 INFO Thread-12 :1569 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log +2024-05-22 18:44:32,817 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:38,141 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:38,290 INFO Thread-12 :1569 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log +2024-05-22 18:44:38,439 DEBUG SenderThread:1569 [sender.py:send():378] send: exit +2024-05-22 18:44:38,439 INFO SenderThread:1569 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:44:38,439 INFO SenderThread:1569 [sender.py:send_exit():587] handling runtime: 10 +2024-05-22 18:44:38,440 INFO SenderThread:1569 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:44:38,441 INFO SenderThread:1569 [sender.py:send_exit():593] send defer +2024-05-22 18:44:38,441 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,441 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:44:38,441 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,441 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:44:38,441 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:44:38,441 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,441 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:44:38,441 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,441 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:44:38,441 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:44:38,441 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,442 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:44:38,442 INFO HandlerThread:1569 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:44:38,442 DEBUG SystemMonitor:1569 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:44:38,442 DEBUG SystemMonitor:1569 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:44:38,442 DEBUG SystemMonitor:1569 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:44:38,442 INFO HandlerThread:1569 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:44:38,445 INFO HandlerThread:1569 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:44:38,445 INFO HandlerThread:1569 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:44:38,445 INFO HandlerThread:1569 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:44:38,446 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,446 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:44:38,446 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:44:38,446 DEBUG SenderThread:1569 [sender.py:send():378] send: stats +2024-05-22 18:44:38,447 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,447 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:44:38,447 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,447 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:44:38,447 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:44:38,447 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,447 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:44:38,448 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,448 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:44:38,448 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:44:38,448 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,448 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:44:38,448 DEBUG SenderThread:1569 [sender.py:send():378] send: summary +2024-05-22 18:44:38,449 INFO SenderThread:1569 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:44:38,449 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,449 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:44:38,449 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:44:38,449 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,449 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:44:38,449 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,449 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:44:38,454 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:44:38,541 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:44:38,541 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:38,541 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:44:38,541 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:38,541 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:44:39,291 INFO Thread-12 :1569 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml +2024-05-22 18:44:39,292 INFO Thread-12 :1569 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json +2024-05-22 18:44:39,439 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:39,838 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:44:39,838 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:39,838 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:39,838 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:44:39,838 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:39,838 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:44:39,838 INFO SenderThread:1569 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:44:39,839 INFO SenderThread:1569 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:44:39,839 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:44:39,839 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:39,839 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:44:39,839 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:39,839 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:44:39,839 INFO SenderThread:1569 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:44:40,293 INFO SenderThread:1569 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log +2024-05-22 18:44:40,294 INFO SenderThread:1569 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files +2024-05-22 18:44:40,294 INFO SenderThread:1569 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:44:40,294 INFO SenderThread:1569 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json wandb-summary.json +2024-05-22 18:44:40,294 INFO SenderThread:1569 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml config.yaml +2024-05-22 18:44:40,296 INFO SenderThread:1569 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt requirements.txt +2024-05-22 18:44:40,297 INFO SenderThread:1569 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log output.log +2024-05-22 18:44:40,297 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:44:40,297 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:40,297 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:44:40,297 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:40,297 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:44:40,297 INFO SenderThread:1569 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:44:40,439 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:40,440 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:40,612 INFO wandb-upload_0:1569 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/wandb-summary.json +2024-05-22 18:44:40,867 INFO wandb-upload_1:1569 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/config.yaml +2024-05-22 18:44:40,900 INFO wandb-upload_2:1569 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/requirements.txt +2024-05-22 18:44:40,908 INFO wandb-upload_3:1569 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/files/output.log +2024-05-22 18:44:41,108 INFO Thread-11 (_thread_body):1569 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:44:41,109 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:41,109 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:44:41,109 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:41,109 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:44:41,109 INFO SenderThread:1569 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:44:41,109 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:44:41,109 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:41,109 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:44:41,109 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:41,109 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:44:41,109 INFO SenderThread:1569 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:44:41,172 INFO SenderThread:1569 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:44:41,172 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:44:41,172 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:41,172 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:44:41,172 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:41,172 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:44:41,172 INFO SenderThread:1569 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:44:41,172 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:44:41,172 INFO HandlerThread:1569 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:44:41,173 DEBUG SenderThread:1569 [sender.py:send():378] send: final +2024-05-22 18:44:41,173 DEBUG SenderThread:1569 [sender.py:send():378] send: footer +2024-05-22 18:44:41,173 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: defer +2024-05-22 18:44:41,173 INFO SenderThread:1569 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:44:41,173 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:41,173 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:44:41,173 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:44:41,174 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:44:41,174 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:44:41,174 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:44:41,174 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:41,174 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:44:41,174 DEBUG SenderThread:1569 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:44:41,227 INFO MainThread:1569 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:44:41,227 INFO MainThread:1569 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:44:41,227 INFO MainThread:1569 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:44:41,227 DEBUG HandlerThread:1569 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:44:41,227 INFO HandlerThread:1569 [handler.py:finish():882] shutting down handler +2024-05-22 18:44:42,174 INFO WriterThread:1569 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/run-x9m2mvtt.wandb +2024-05-22 18:44:42,227 INFO SenderThread:1569 [sender.py:finish():1545] shutting down sender +2024-05-22 18:44:42,227 INFO SenderThread:1569 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:44:42,227 INFO SenderThread:1569 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..c7f48a2228400141c6860dce22c7b249e20f2bb9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Configure stats pid to 1414 +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:44:26,948 WARNING MainThread:1414 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:44:26,948 INFO MainThread:1414 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug.log +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/logs/debug-internal.log +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:init():610] starting backend +2024-05-22 18:44:26,949 INFO MainThread:1414 [wandb_init.py:init():614] setting up manager +2024-05-22 18:44:26,953 INFO MainThread:1414 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:44:26,954 INFO MainThread:1414 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:44:26,958 INFO MainThread:1414 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:44:26,967 INFO MainThread:1414 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:44:27,285 INFO MainThread:1414 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:44:27,455 INFO MainThread:1414 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:44:27,455 INFO MainThread:1414 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:44:27,719 INFO MainThread:1414 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:44:27,719 INFO MainThread:1414 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:44:27,720 INFO MainThread:1414 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:44:27,720 INFO MainThread:1414 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:44:27,723 INFO MainThread:1414 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:44:42,228 WARNING MsgRouterThr:1414 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/run-x9m2mvtt.wandb b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/run-x9m2mvtt.wandb new file mode 100644 index 0000000000000000000000000000000000000000..186edb2d4fc3b902ac13a6d58ea596173768ad02 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240522_184426-x9m2mvtt/run-x9m2mvtt.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..88181d04cb90f3bd8f00a85cc517ce4f45bd5aed --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/requirements.txt @@ -0,0 +1,156 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +megatron-lm==1.1.5 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..6e5348024f7bd7fa2ca086002786a4e901dfc8f4 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T04:08:16.110227", + "startedAt": "2024-05-23T04:08:15.580708", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.355975, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3368.704, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.002, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.5428123474121 + } + }, + "memory": { + "total": 1007.4379463195801 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..018b4068e81a342faa2a3b0691dee6965106bcc3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_040815-tp6gmuhp/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 12}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f8eaa9ab255a539a7a250ce0b43dffb03deec61 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716447824 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..3b1963c4f4dc2234a7c3cdcc2b4ea8c2f89d148b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:07:03:44,676 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:07:03:54,065 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:07:03:54,067 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:07:03:54,067 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:07:03:56,364 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..4efe6c1a51f057525f2d44ca2e07a05d461d2b5f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T07:03:44.470568", + "startedAt": "2024-05-23T07:03:43.984994", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2339.9390875, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.60309219360352 + } + }, + "memory": { + "total": 1007.4379501342773 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..8f5624b50f4d774a70f6c8292c6b7927cf2178b6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug-internal.log @@ -0,0 +1,181 @@ +2024-05-23 07:03:44,005 INFO StreamThr :825 [internal.py:wandb_internal():85] W&B internal server running at pid: 825, started at: 2024-05-23 07:03:44.004719 +2024-05-23 07:03:44,009 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status +2024-05-23 07:03:44,010 INFO WriterThread:825 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/run-3t5hp2cf.wandb +2024-05-23 07:03:44,013 DEBUG SenderThread:825 [sender.py:send():378] send: header +2024-05-23 07:03:44,018 DEBUG SenderThread:825 [sender.py:send():378] send: run +2024-05-23 07:03:44,269 INFO SenderThread:825 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files +2024-05-23 07:03:44,269 INFO SenderThread:825 [sender.py:_start_run_threads():1123] run started: 3t5hp2cf with start time 1716447824.005669 +2024-05-23 07:03:44,273 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 07:03:44,273 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: check_version +2024-05-23 07:03:44,392 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 07:03:44,394 DEBUG HandlerThread:825 [system_info.py:__init__():26] System info init +2024-05-23 07:03:44,394 DEBUG HandlerThread:825 [system_info.py:__init__():41] System info init done +2024-05-23 07:03:44,394 INFO HandlerThread:825 [system_monitor.py:start():194] Starting system monitor +2024-05-23 07:03:44,394 INFO SystemMonitor:825 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 07:03:44,394 INFO HandlerThread:825 [system_monitor.py:probe():214] Collecting system info +2024-05-23 07:03:44,402 INFO SystemMonitor:825 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 07:03:44,407 INFO SystemMonitor:825 [interfaces.py:start():188] Started disk monitoring +2024-05-23 07:03:44,408 INFO SystemMonitor:825 [interfaces.py:start():188] Started memory monitoring +2024-05-23 07:03:44,408 INFO SystemMonitor:825 [interfaces.py:start():188] Started network monitoring +2024-05-23 07:03:44,470 DEBUG HandlerThread:825 [system_info.py:probe():150] Probing system +2024-05-23 07:03:44,473 DEBUG HandlerThread:825 [system_info.py:_probe_git():135] Probing git +2024-05-23 07:03:44,483 ERROR HandlerThread:825 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 07:03:44,483 DEBUG HandlerThread:825 [system_info.py:_probe_git():143] Probing git done +2024-05-23 07:03:44,483 DEBUG HandlerThread:825 [system_info.py:probe():198] Probing system done +2024-05-23 07:03:44,483 DEBUG HandlerThread:825 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T07:03:44.470568', 'startedAt': '2024-05-23T07:03:43.984994', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2339.9390875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.60309219360352}}, 'memory': {'total': 1007.4379501342773}} +2024-05-23 07:03:44,483 INFO HandlerThread:825 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 07:03:44,483 INFO HandlerThread:825 [system_monitor.py:probe():227] Publishing system info +2024-05-23 07:03:44,487 INFO HandlerThread:825 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 07:03:44,492 DEBUG SenderThread:825 [sender.py:send():378] send: files +2024-05-23 07:03:44,492 INFO SenderThread:825 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 07:03:44,670 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 07:03:44,671 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: python_packages +2024-05-23 07:03:44,673 DEBUG SenderThread:825 [sender.py:send():378] send: telemetry +2024-05-23 07:03:44,713 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 07:03:44,714 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: stop_status +2024-05-23 07:03:45,119 INFO wandb-upload_0:825 [upload_job.py:push():130] Uploaded file /tmp/tmphpjqrb5kwandb/90sz5db7-wandb-metadata.json +2024-05-23 07:03:45,271 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-metadata.json +2024-05-23 07:03:45,271 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt +2024-05-23 07:03:45,271 INFO Thread-12 :825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log +2024-05-23 07:03:47,270 INFO Thread-12 :825 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log +2024-05-23 07:03:49,870 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:03:55,068 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:03:55,277 INFO Thread-12 :825 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log +2024-05-23 07:03:56,380 DEBUG SenderThread:825 [sender.py:send():378] send: exit +2024-05-23 07:03:56,381 INFO SenderThread:825 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 07:03:56,381 INFO SenderThread:825 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 07:03:56,382 INFO SenderThread:825 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:03:56,382 INFO SenderThread:825 [sender.py:send_exit():593] send defer +2024-05-23 07:03:56,383 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,383 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 07:03:56,383 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,383 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 07:03:56,383 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 1 +2024-05-23 07:03:56,383 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,383 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 07:03:56,383 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,383 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 07:03:56,383 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 2 +2024-05-23 07:03:56,383 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,383 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 07:03:56,383 INFO HandlerThread:825 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 07:03:56,384 INFO HandlerThread:825 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 07:03:56,384 INFO HandlerThread:825 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 07:03:56,384 INFO HandlerThread:825 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 07:03:56,384 INFO HandlerThread:825 [interfaces.py:finish():200] Joined network monitor +2024-05-23 07:03:56,384 DEBUG SystemMonitor:825 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 07:03:56,389 DEBUG SystemMonitor:825 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 07:03:56,390 DEBUG SystemMonitor:825 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 07:03:56,392 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,393 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 07:03:56,393 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 3 +2024-05-23 07:03:56,393 DEBUG SenderThread:825 [sender.py:send():378] send: stats +2024-05-23 07:03:56,394 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,394 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 07:03:56,394 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,394 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 07:03:56,394 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 4 +2024-05-23 07:03:56,394 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,394 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 07:03:56,394 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,394 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 07:03:56,394 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 5 +2024-05-23 07:03:56,394 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,394 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 07:03:56,395 DEBUG SenderThread:825 [sender.py:send():378] send: summary +2024-05-23 07:03:56,395 INFO SenderThread:825 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 07:03:56,396 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,396 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 07:03:56,396 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 6 +2024-05-23 07:03:56,396 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,396 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 07:03:56,396 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,396 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 07:03:56,400 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 07:03:56,465 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 7 +2024-05-23 07:03:56,465 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,465 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 07:03:56,465 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,466 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 07:03:56,898 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 8 +2024-05-23 07:03:56,898 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,898 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 07:03:56,898 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,898 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 07:03:56,899 INFO SenderThread:825 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 07:03:56,899 INFO SenderThread:825 [job_builder.py:_get_source_type():576] no source found +2024-05-23 07:03:56,899 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 9 +2024-05-23 07:03:56,899 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:56,899 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 07:03:56,899 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:56,899 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 07:03:56,899 INFO SenderThread:825 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 07:03:57,279 INFO SenderThread:825 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log +2024-05-23 07:03:57,279 INFO SenderThread:825 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml +2024-05-23 07:03:57,279 INFO SenderThread:825 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json +2024-05-23 07:03:57,279 INFO SenderThread:825 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files +2024-05-23 07:03:57,280 INFO SenderThread:825 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log output.log +2024-05-23 07:03:57,280 INFO SenderThread:825 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml config.yaml +2024-05-23 07:03:57,282 INFO SenderThread:825 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json wandb-summary.json +2024-05-23 07:03:57,284 INFO SenderThread:825 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-metadata.json wandb-metadata.json +2024-05-23 07:03:57,284 INFO SenderThread:825 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt requirements.txt +2024-05-23 07:03:57,284 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 10 +2024-05-23 07:03:57,284 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:57,284 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 07:03:57,285 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:57,285 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 07:03:57,285 INFO SenderThread:825 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:03:57,381 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:03:57,381 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:03:57,512 INFO wandb-upload_0:825 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/output.log +2024-05-23 07:03:57,839 INFO wandb-upload_1:825 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/config.yaml +2024-05-23 07:03:57,877 INFO wandb-upload_2:825 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/wandb-summary.json +2024-05-23 07:03:57,883 INFO wandb-upload_3:825 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/files/requirements.txt +2024-05-23 07:03:58,083 INFO Thread-11 (_thread_body):825 [sender.py:transition_state():613] send defer: 11 +2024-05-23 07:03:58,083 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:58,083 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 07:03:58,083 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:58,083 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 07:03:58,083 INFO SenderThread:825 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 07:03:58,084 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 12 +2024-05-23 07:03:58,084 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:58,084 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 07:03:58,084 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:58,084 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 07:03:58,084 INFO SenderThread:825 [file_stream.py:finish():601] file stream finish called +2024-05-23 07:03:58,260 INFO SenderThread:825 [file_stream.py:finish():605] file stream finish is done +2024-05-23 07:03:58,260 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 13 +2024-05-23 07:03:58,260 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:58,260 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 07:03:58,260 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:58,260 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 07:03:58,260 INFO SenderThread:825 [sender.py:transition_state():613] send defer: 14 +2024-05-23 07:03:58,260 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: defer +2024-05-23 07:03:58,260 INFO HandlerThread:825 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 07:03:58,261 DEBUG SenderThread:825 [sender.py:send():378] send: final +2024-05-23 07:03:58,261 DEBUG SenderThread:825 [sender.py:send():378] send: footer +2024-05-23 07:03:58,261 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: defer +2024-05-23 07:03:58,261 INFO SenderThread:825 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 07:03:58,261 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:03:58,261 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:03:58,262 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 07:03:58,262 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 07:03:58,262 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 07:03:58,262 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 07:03:58,262 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 07:03:58,262 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 07:03:58,263 DEBUG SenderThread:825 [sender.py:send_request():405] send_request: server_info +2024-05-23 07:03:58,325 INFO MainThread:825 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 07:03:58,325 INFO MainThread:825 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 07:03:58,325 INFO MainThread:825 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 07:03:58,325 DEBUG HandlerThread:825 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 07:03:58,326 INFO HandlerThread:825 [handler.py:finish():882] shutting down handler +2024-05-23 07:03:59,262 INFO WriterThread:825 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/run-3t5hp2cf.wandb +2024-05-23 07:03:59,325 INFO SenderThread:825 [sender.py:finish():1545] shutting down sender +2024-05-23 07:03:59,325 INFO SenderThread:825 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 07:03:59,325 INFO SenderThread:825 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..40dc0b8bc8238538a988744011e2ad11d0c6336f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Configure stats pid to 669 +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 07:03:43,999 WARNING MainThread:669 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 07:03:43,999 INFO MainThread:669 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug.log +2024-05-23 07:03:44,000 INFO MainThread:669 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/logs/debug-internal.log +2024-05-23 07:03:44,000 INFO MainThread:669 [wandb_init.py:init():560] calling init triggers +2024-05-23 07:03:44,000 INFO MainThread:669 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 07:03:44,000 INFO MainThread:669 [wandb_init.py:init():610] starting backend +2024-05-23 07:03:44,000 INFO MainThread:669 [wandb_init.py:init():614] setting up manager +2024-05-23 07:03:44,004 INFO MainThread:669 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 07:03:44,005 INFO MainThread:669 [wandb_init.py:init():622] backend started and connected +2024-05-23 07:03:44,009 INFO MainThread:669 [wandb_init.py:init():711] updated telemetry +2024-05-23 07:03:44,018 INFO MainThread:669 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 07:03:44,273 INFO MainThread:669 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 07:03:44,386 INFO MainThread:669 [wandb_run.py:_on_init():2405] got version response +2024-05-23 07:03:44,386 INFO MainThread:669 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 07:03:44,672 INFO MainThread:669 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 07:03:44,672 INFO MainThread:669 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 07:03:44,672 INFO MainThread:669 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 07:03:44,672 INFO MainThread:669 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 07:03:44,674 INFO MainThread:669 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 07:03:59,326 WARNING MsgRouterThr:669 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/run-3t5hp2cf.wandb b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/run-3t5hp2cf.wandb new file mode 100644 index 0000000000000000000000000000000000000000..8eb0e215ff644e7f6bb3a3282dbf6790d12f9807 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_070343-3t5hp2cf/run-3t5hp2cf.wandb differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2e143f59fad6e49dfbeb64032ab92c9e3d8ca58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c7dfc33440208de0dc2af6b2a8a2d7cbd709d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaaa30eb04c2ee4afaf891c0ac482ed1fb0624cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f18bdc42ce66cb38b20c01591bb9ee46ebe68af Binary files /dev/null and b/venv/lib/python3.10/site-packages/dataproperty/__pycache__/_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__init__.py b/venv/lib/python3.10/site-packages/triton/language/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b5cfac6290430ffb267084f79e82718dd71123 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/__init__.py @@ -0,0 +1,218 @@ +"""isort:skip_file""" +# Import order is significant here. + +from . import math +from . import extra +from .standard import ( + argmax, + argmin, + cdiv, + cumprod, + cumsum, + max, + maximum, + min, + minimum, + sigmoid, + softmax, + sort, + sum, + ravel, + swizzle2d, + xor_sum, + zeros, + zeros_like, +) +from .core import ( + TRITON_MAX_TENSOR_NUMEL, + abs, + advance, + arange, + associative_scan, + atomic_add, + atomic_and, + atomic_cas, + atomic_max, + atomic_min, + atomic_or, + atomic_xchg, + atomic_xor, + bfloat16, + block_type, + broadcast, + broadcast_to, + cat, + constexpr, + cos, + debug_barrier, + device_assert, + device_print, + dot, + dtype, + exp, + expand_dims, + full, + fdiv, + float16, + float32, + float64, + float8e4b15, + float8e4b15x4, + float8e4nv, + float8e5, + function_type, + inline_asm_elementwise, + int1, + int16, + int32, + int64, + int8, + load, + log, + make_block_ptr, + max_constancy, + max_contiguous, + multiple_of, + num_programs, + pi32_t, + pointer_type, + program_id, + reduce, + reshape, + sin, + sqrt, + static_assert, + static_print, + store, + static_range, + tensor, + trans, + # triton, + uint16, + uint32, + uint64, + uint8, + umulhi, + view, + void, + where, +) +from .random import ( + pair_uniform_to_normal, + philox, + philox_impl, + rand, + rand4x, + randint, + randint4x, + randn, + randn4x, + uint_to_uniform_float, +) + +__all__ = [ + "TRITON_MAX_TENSOR_NUMEL", + "abs", + "advance", + "arange", + "argmin", + "argmax", + "associative_scan", + "atomic_add", + "atomic_and", + "atomic_cas", + "atomic_max", + "atomic_min", + "atomic_or", + "atomic_xchg", + "atomic_xor", + "bfloat16", + "block_type", + "broadcast", + "broadcast_to", + "builtin", + "cat", + "cdiv", + "constexpr", + "cos", + "cumprod", + "cumsum", + "debug_barrier", + "device_assert", + "device_print", + "dot", + "dtype", + "exp", + "expand_dims", + "extra", + "fdiv", + "float16", + "float32", + "float64", + "float8e4b15", + "float8e4b15x4", + "float8e4nv", + "float8e5", + "full", + "function_type", + "inline_asm_elementwise", + "int1", + "int16", + "int32", + "int64", + "int8", + "ir", + "math", + "load", + "log", + "make_block_ptr", + "max", + "max_constancy", + "max_contiguous", + "maximum", + "min", + "minimum", + "multiple_of", + "num_programs", + "pair_uniform_to_normal", + "philox", + "philox_impl", + "pi32_t", + "pointer_type", + "program_id", + "rand", + "rand4x", + "randint", + "randint4x", + "randn", + "randn4x", + "ravel", + "reduce", + "reshape", + "sigmoid", + "sin", + "softmax", + "sort", + "sqrt", + "static_range", + "static_assert", + "static_print", + "store", + "sum", + "swizzle2d", + "tensor", + "trans", + "triton", + "uint16", + "uint32", + "uint_to_uniform_float", + "uint64", + "uint8", + "umulhi", + "view", + "void", + "where", + "xor_sum", + "zeros", + "zeros_like", +] diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c854de44f33990e1c65d6d9fad72dca85939251e Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af79794b9a52fec3ae931156b4219173c22267f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f9f49c70e5b08c957ca37b01bce406cfa5ad10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..539cbf83d83fc04fafe42b64bfc1ed2a6fe809ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff5ef9b76d6ee8bd9ae4023b4a22fb9dcc6b3a17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55b5d7eaa920a57763044b1be10f11c2e739596e Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/core.py b/venv/lib/python3.10/site-packages/triton/language/core.py new file mode 100644 index 0000000000000000000000000000000000000000..a60a9b7bc83263e5cda4f02f6ba0cfbfbf429540 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/core.py @@ -0,0 +1,1883 @@ +from __future__ import annotations + +from contextlib import contextmanager +from enum import Enum +from functools import partial, wraps +from typing import Callable, List, Sequence, TypeVar + +from .._C.libtriton.triton import ir +from . import semantic + +T = TypeVar('T') + +TRITON_MAX_TENSOR_NUMEL = 1048576 + +TRITON_BUILTIN = "__triton_builtin__" + + +def builtin(fn: T) -> T: + """Mark a function as a builtin.""" + assert callable(fn) + + @wraps(fn) + def wrapper(*args, **kwargs): + if "_builder" not in kwargs or kwargs["_builder"] is None: + raise ValueError("Did you forget to add @triton.jit ? " + "(`_builder` argument must be provided outside of JIT functions.)") + return fn(*args, **kwargs) + + setattr(wrapper, TRITON_BUILTIN, True) + + return wrapper + + +def is_builtin(fn) -> bool: + """Is this a registered triton builtin function?""" + return getattr(fn, TRITON_BUILTIN, False) + + +def _to_tensor(x, builder): + if isinstance(x, bool): + return tensor(builder.get_int1(x), int1) + # Note: compile-time const integers are represented by unsigned values + elif isinstance(x, int): + if -2**31 <= x < 2**31: + return tensor(builder.get_int32(x), int32) + elif 2**31 <= x < 2**32: + return tensor(builder.get_uint32(x), uint32) + elif -2**63 <= x < 2**63: + return tensor(builder.get_int64(x), int64) + elif 2**63 <= x < 2**64: + return tensor(builder.get_uint64(x), uint64) + else: + raise RuntimeError(f'Nonrepresentable integer {x}.') + elif isinstance(x, float): + min_float32 = 2**-126 + max_float32 = (2 - 2**-23) * 2**127 + abs_x = __builtins__['abs'](x) + if abs_x == float("inf") or\ + abs_x == 0.0 or \ + x != x or \ + min_float32 <= abs_x <= max_float32: + return tensor(builder.get_fp32(x), float32) + else: + return tensor(builder.get_fp64(x), float64) + + elif isinstance(x, constexpr): + return _to_tensor(x.value, builder) + elif isinstance(x, tensor): + return x + assert False, f"cannot convert {x} of type {type(x)} to tensor" + + +class dtype: + SINT_TYPES = ['int8', 'int16', 'int32', 'int64'] + UINT_TYPES = ['int1', 'uint8', 'uint16', 'uint32', 'uint64'] + FP_TYPES = ['fp8e4b15', 'fp8e4b15x4', 'fp8e4nv', 'fp8e5', 'fp16', 'bf16', 'fp32', 'fp64'] + STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] + OTHER_TYPES = ['void'] + + class SIGNEDNESS(Enum): + SIGNED = 0 + UNSIGNED = 1 + + def __init__(self, name): + self.name = name + assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name + if name in dtype.SINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.SIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.UINT_TYPES: + self.int_signedness = dtype.SIGNEDNESS.UNSIGNED + self.int_bitwidth = int(name.split('int')[-1]) + self.primitive_bitwidth = self.int_bitwidth + elif name in dtype.FP_TYPES: + if name == 'fp8e4b15': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e4b15x4': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp8e4nv': + self.fp_mantissa_width = 3 + self.primitive_bitwidth = 8 + self.exponent_bias = 7 + elif name == 'fp8e5': + self.fp_mantissa_width = 2 + self.primitive_bitwidth = 8 + self.exponent_bias = 15 + elif name == 'fp16': + self.fp_mantissa_width = 10 + self.primitive_bitwidth = 16 + self.exponent_bias = 15 + elif name == 'bf16': + self.fp_mantissa_width = 7 + self.primitive_bitwidth = 16 + self.exponent_bias = 127 + elif name == 'fp32': + self.fp_mantissa_width = 23 + self.primitive_bitwidth = 32 + self.exponent_bias = 127 + elif name == 'fp64': + self.fp_mantissa_width = 53 + self.primitive_bitwidth = 64 + self.exponent_bias = 1023 + else: + raise RuntimeError(f'Unsupported floating-point type {name}') + elif name == 'void': + self.primitive_bitwidth = 0 + + def is_fp8(self): + return 'fp8' in self.name + + def is_fp8e4nv(self): + return self.name == 'fp8e4nv' + + def is_fp8e4b15(self): + return self.name == 'fp8e4b15' + + def is_fp8e4b15x4(self): + return self.name == 'fp8e4b15x4' + + def is_fp8e5(self): + return self.name == 'fp8e5' + + def is_fp16(self): + return self.name == 'fp16' + + def is_bf16(self): + return self.name == 'bf16' + + def is_fp32(self): + return self.name == 'fp32' + + def is_fp64(self): + return self.name == 'fp64' + + def is_int1(self): + return self.name == 'int1' + + def is_int8(self): + return self.name == 'int8' + + def is_int16(self): + return self.name == 'int16' + + def is_int32(self): + return self.name == 'int32' + + def is_int64(self): + return self.name == 'int64' + + def is_uint8(self): + return self.name == 'uint8' + + def is_uint16(self): + return self.name == 'uint16' + + def is_uint32(self): + return self.name == 'uint32' + + def is_uint64(self): + return self.name == 'uint64' + + def is_floating(self): + return self.name in dtype.FP_TYPES + + def is_standard_floating(self): + return self.name in dtype.STANDARD_FP_TYPES + + def is_int_signed(self): + return self.name in dtype.SINT_TYPES + + def is_int_unsigned(self): + return self.name in dtype.UINT_TYPES + + def is_int(self): + return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES + + def is_bool(self): + return self.is_int1() + + @staticmethod + def is_dtype(type_str): + return type_str in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES + + @staticmethod + def is_void(): + raise RuntimeError("Not implemented") + + @staticmethod + def is_block(): + return False + + @staticmethod + def is_ptr(): + return False + + def __eq__(self, other: dtype): + if not isinstance(other, dtype): + return False + return self.name == other.name + + def __ne__(self, other: dtype): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.name, )) + + @property + def scalar(self): + return self + + def to_ir(self, builder: ir.builder) -> ir.type: + if self.name == 'void': + return builder.get_void_ty() + elif self.name == 'int1': + return builder.get_int1_ty() + elif self.name in ('int8', 'uint8'): + return builder.get_int8_ty() + elif self.name in ('int16', 'uint16'): + return builder.get_int16_ty() + elif self.name in ('int32', 'uint32'): + return builder.get_int32_ty() + elif self.name in ('int64', 'uint64'): + return builder.get_int64_ty() + elif self.name == 'fp8e5': + return builder.get_fp8e5_ty() + elif self.name == 'fp8e4nv': + return builder.get_fp8e4nv_ty() + elif self.name == 'fp8e4b15': + return builder.get_fp8e4b15_ty() + elif self.name == 'fp8e4b15x4': + return builder.get_fp8e4b15x4_ty() + elif self.name == 'fp16': + return builder.get_half_ty() + elif self.name == 'bf16': + return builder.get_bf16_ty() + elif self.name == 'fp32': + return builder.get_float_ty() + elif self.name == 'fp64': + return builder.get_double_ty() + raise ValueError(f'fail to convert {self} to ir type') + + def __str__(self): + return self.name + + @property + def cache_key_part(self) -> str: + """See cache_key_part() in triton.cc.""" + return self.name + + def __repr__(self): + return f'triton.language.{str(self)}' + + +class pointer_type(dtype): + + def __init__(self, element_ty: dtype, address_space: int = 1): + if not isinstance(element_ty, dtype): + raise TypeError('element_ty is a {type(element_ty).__name__}.') + self.element_ty = element_ty + self.address_space = address_space + + self.name = self.__str__() + + def to_ir(self, builder: ir.builder) -> ir.pointer_type: + return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) + + def __str__(self): + return f'pointer<{self.element_ty}>' + + def __repr__(self): + return self.__str__() + + def is_ptr(self): + return True + + def __eq__(self, other: pointer_type) -> bool: + if not isinstance(other, pointer_type): + return False + return self.element_ty == other.element_ty and self.address_space == other.address_space + + def __ne__(self, other: pointer_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self + + +class block_type(dtype): + + def __init__(self, element_ty: dtype, shape: List): + self.element_ty = element_ty + + # Note that block_type's shape is a list of int + # while tensor's shape is a list of constexpr. + + # shape can be empty ([]) when an input is a 0D tensor. + if not shape: + raise TypeError('0d block_type is forbidden') + if isinstance(shape[0], constexpr): + shape = [s.value for s in shape] + + self.shape = shape + self.numel = 1 + for s in self.shape: + self.numel *= s + if self.numel > TRITON_MAX_TENSOR_NUMEL: + raise ValueError(f"numel ({self.numel}) exceeds triton maximum tensor numel ({TRITON_MAX_TENSOR_NUMEL})") + + self.name = self.__str__() + + def to_ir(self, builder: ir.builder) -> ir.block_type: + return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) + + def __str__(self): + return f'<{self.shape}, {self.element_ty}>' + + def __repr__(self): + return self.__str__() + + def is_block(self): + return True + + def get_block_shapes(self) -> List[int]: + return self.shape + + def __eq__(self, other: block_type) -> bool: + if not isinstance(other, block_type): + return False + return self.element_ty == other.element_ty and self.shape == other.shape + + def __ne__(self, other: block_type) -> bool: + return not self.__eq__(other) + + @property + def scalar(self): + return self.element_ty + + +class function_type(dtype): + + def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: + self.ret_types = ret_types + self.param_types = param_types + + def __str__(self): + return f'fn ({self.param_types}) -> {self.ret_types}' + + def to_ir(self, builder: ir.builder): + ir_param_types = [ty.to_ir(builder) for ty in self.param_types] + ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] + return builder.get_function_ty(ir_param_types, ret_types) + + +# scalar types +void = dtype('void') +int1 = dtype('int1') +int8 = dtype('int8') +int16 = dtype('int16') +int32 = dtype('int32') +int64 = dtype('int64') +uint8 = dtype('uint8') +uint16 = dtype('uint16') +uint32 = dtype('uint32') +uint64 = dtype('uint64') +float8e5 = dtype('fp8e5') +float8e4nv = dtype('fp8e4nv') +float8e4b15 = dtype('fp8e4b15') +float8e4b15x4 = dtype('fp8e4b15x4') +float16 = dtype('fp16') +bfloat16 = dtype('bf16') +float32 = dtype('fp32') +float64 = dtype('fp64') +# pointer types +pi32_t = pointer_type(int32) + +# ----------------------- +# constexpr +# ----------------------- + + +class constexpr: + """ + This class is used to store a value that is known at compile-time. + """ + + def __init__(self, value): + if isinstance(value, constexpr): + self.value = value.value + else: + self.value = value + + def __repr__(self) -> str: + return f"constexpr[{self.value}]" + + def __index__(self): + return self.value + + def __add__(self, other): + return constexpr(self.value + other.value) + + def __radd__(self, other): + return constexpr(other.value + self.value) + + def __sub__(self, other): + return constexpr(self.value - other.value) + + def __rsub__(self, other): + return constexpr(other.value - self.value) + + def __mul__(self, other): + return constexpr(self.value * other.value) + + def __mod__(self, other): + return constexpr(self.value % other.value) + + def __rmul__(self, other): + return constexpr(other.value * self.value) + + def __truediv__(self, other): + return constexpr(self.value / other.value) + + def __rtruediv__(self, other): + return constexpr(other.value / self.value) + + def __floordiv__(self, other): + return constexpr(self.value // other.value) + + def __rfloordiv__(self, other): + return constexpr(other.value // self.value) + + def __gt__(self, other): + return constexpr(self.value > other.value) + + def __rgt__(self, other): + return constexpr(other.value > self.value) + + def __ge__(self, other): + return constexpr(self.value >= other.value) + + def __rge__(self, other): + return constexpr(other.value >= self.value) + + def __lt__(self, other): + return constexpr(self.value < other.value) + + def __rlt__(self, other): + return constexpr(other.value < self.value) + + def __le__(self, other): + return constexpr(self.value <= other.value) + + def __rle__(self, other): + return constexpr(other.value <= self.value) + + def __eq__(self, other): + return constexpr(self.value == other.value) + + def __ne__(self, other): + return constexpr(self.value != other.value) + + def __bool__(self): + return bool(self.value) + + def __neg__(self): + return constexpr(-self.value) + + def __and__(self, other): + return constexpr(self.value & other.value) + + def logical_and(self, other): + return constexpr(self.value and other.value) + + def __or__(self, other): + return constexpr(self.value | other.value) + + def __xor__(self, other): + return constexpr(self.value ^ other.value) + + def logical_or(self, other): + return constexpr(self.value or other.value) + + def __pos__(self): + return constexpr(+self.value) + + def __invert__(self): + return constexpr(~self.value) + + def __pow__(self, other): + return constexpr(self.value**other.value) + + def __rshift__(self, other): + return constexpr(self.value >> other.value) + + def __lshift__(self, other): + return constexpr(self.value << other.value) + + def __not__(self): + return constexpr(not self.value) + + def __call__(self, *args, **kwds): + return self.value(*args, **kwds) + + +class tensor: + + def __init__(self, handle, type: dtype): + # IR handle + self.handle = handle + # Block shape + self.shape = type.shape if type.is_block() else () + self.numel = 1 + for s in self.shape: + self.numel *= s + self.numel = constexpr(self.numel) + self.type = type # Tensor type (can be block_type) + # Following the practice in pytorch, dtype is scalar type + self.dtype = type.scalar + self.shape = [constexpr(s) for s in self.shape] + + def __str__(self) -> str: + # ex. "float32[16, 32]" + return str(self.dtype) + '[' + ', '.join(str(s) for s in self.shape) + ']' + + @builtin + def __add__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.add(self, other, _builder) + + @builtin + def __radd__(self, other, _builder=None): + return self.__add__(other, _builder=_builder) + + @builtin + def __sub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(self, other, _builder) + + @builtin + def __rsub__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.sub(other, self, _builder) + + @builtin + def __mul__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mul(self, other, _builder) + + @builtin + def __rmul__(self, other, _builder=None): + return self.__mul__(other, _builder=_builder) + + @builtin + def __truediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(self, other, _builder) + + @builtin + def __rtruediv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.truediv(other, self, _builder) + + @builtin + def __floordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(self, other, _builder) + + @builtin + def __rfloordiv__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.floordiv(other, self, _builder) + + @builtin + def __mod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(self, other, _builder) + + @builtin + def __rmod__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.mod(other, self, _builder) + + # unary operators + @builtin + def __neg__(self, _builder=None): + return semantic.minus(self, _builder) + + @builtin + def __invert__(self, _builder=None): + return semantic.invert(self, _builder) + + # bitwise operators + + @builtin + def __and__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(self, other, _builder) + + @builtin + def __rand__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.and_(other, self, _builder) + + @builtin + def __or__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(self, other, _builder) + + @builtin + def __ror__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.or_(other, self, _builder) + + @builtin + def __xor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(self, other, _builder) + + @builtin + def __rxor__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.xor_(other, self, _builder) + + @builtin + def __lshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.shl(self, other, _builder) + + @builtin + def __rlshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.shl(other, self, _builder) + + @builtin + def __rshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(self, other, _builder) + else: + return semantic.lshr(self, other, _builder) + + @builtin + def __rrshift__(self, other, _builder=None): + other = _to_tensor(other, _builder) + if self.dtype.is_int_signed(): + return semantic.ashr(other, self, _builder) + else: + return semantic.lshr(other, self, _builder) + + # > + @builtin + def __gt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(self, other, _builder) + + @builtin + def __rgt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_than(other, self, _builder) + + # >= + @builtin + def __ge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(self, other, _builder) + + @builtin + def __rge__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.greater_equal(other, self, _builder) + + # < + @builtin + def __lt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(self, other, _builder) + + @builtin + def __rlt__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_than(other, self, _builder) + + # <= + @builtin + def __le__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(self, other, _builder) + + @builtin + def __rle__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.less_equal(other, self, _builder) + + # == + @builtin + def __eq__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(self, other, _builder) + + @builtin + def __req__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.equal(other, self, _builder) + + @builtin + def __ne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(self, other, _builder) + + @builtin + def __rne__(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.not_equal(other, self, _builder) + + @builtin + def logical_and(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_and(self, other, _builder) + + @builtin + def logical_or(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_or(self, other, _builder) + + # note: __not__ isn't actually a magic method in python + # but it's ok because our ASTVisitor handles it + @builtin + def __not__(self, _builder=None): + return semantic.not_(self, _builder) + + @builtin + def __getitem__(self, slices, _builder=None): + if isinstance(slices, (slice, constexpr)): + slices = [slices] + ret = self + for dim, sl in enumerate(slices): + if sl is None or isinstance(sl, constexpr) and sl.value is None: + ret = semantic.expand_dims(ret, dim, _builder) + elif isinstance(sl, slice) and sl.start is None and sl.stop is None and sl.step is None: + pass + else: + assert False, f"unsupported tensor index: {sl}" + return ret + + @property + def T(self): + assert False, "Transposition must be created by the AST Visitor" + + @builtin + def to(self, dtype, bitcast=False, _builder=None): + if isinstance(bitcast, constexpr): + bitcast = bitcast.value + if bitcast: + return semantic.bitcast(self, dtype, _builder) + return semantic.cast(self, dtype, _builder) + + +# ----------------------- +# SPMD Programming Model +# ----------------------- +def _constexpr_to_value(v): + if isinstance(v, constexpr): + return v.value + return v + + +@builtin +def program_id(axis, _builder=None): + """ + Returns the id of the current program instance along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. + :type axis: int + """ + # if axis == -1: + # pid0 = program_id(0, _builder) + # pid1 = program_id(1, _builder) + # pid2 = program_id(2, _builder) + # npg0 = num_programs(0, _builder) + # npg1 = num_programs(0, _builder) + # return pid0 + pid1*npg0 + pid2*npg0*npg1 + axis = _constexpr_to_value(axis) + return semantic.program_id(axis, _builder) + + +@builtin +def num_programs(axis, _builder=None): + """ + Returns the number of program instances launched along the given :code:`axis`. + + :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. + :type axis: int + """ + axis = _constexpr_to_value(axis) + return semantic.num_programs(axis, _builder) + + +# ----------------------- +# Block Initialization +# ----------------------- + + +@builtin +def arange(start, end, _builder=None): + """ + Returns contiguous values within the left-closed and right-open interval [:code:`start`, :code:`end`). \ + End - Start must be less than or equal to TRITON_MAX_TENSOR_NUMEL = 131072 + + :param start: Start of the interval. Must be a power of two. + :type start: int32 + :param end: End of the interval. Must be a power of two > start. + :type end: int32 + """ + start = _constexpr_to_value(start) + end = _constexpr_to_value(end) + return semantic.arange(start, end, _builder) + + +def _shape_check_impl(shape): + shape = _constexpr_to_value(shape) + for i, d in enumerate(shape): + if isinstance(d, int): + d = constexpr(d) + if not isinstance(d, constexpr): + raise TypeError(f"Shape element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + return [_constexpr_to_value(x) for x in shape] + + +@builtin +def full(shape, value, dtype, _builder=None): + """ + Returns a tensor filled with the scalar value for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :value value: A scalar value to fill the array with + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + shape = _shape_check_impl(shape) + value = _constexpr_to_value(value) + dtype = _constexpr_to_value(dtype) + return semantic.full(shape, value, dtype, _builder) + + +# ----------------------- +# Shape Manipulation +# ----------------------- + + +@builtin +def broadcast(input, other, _builder=None): + """ + Tries to broadcast the two given blocks to a common compatible shape. + + :param input: The first input tensor. + :type input: Block + :param other: The second input tensor. + :type other: Block + """ + return semantic.broadcast_impl_value(input, other, _builder) + + +@builtin +def broadcast_to(input, shape, _builder=None): + """ + Tries to broadcast the given tensor to a new :code:`shape`. + + :param input: The input tensor. + :type input: Block + :param shape: The desired shape. + :type shape: Tuple[int] + """ + shape = _shape_check_impl(shape) + return semantic.broadcast_impl_shape(input, shape, _builder) + + +@builtin +def trans(input, _builder=None): + """ + Returns a transposed tensor. + + :param input: The input tensor. + :type input: + """ + return semantic.trans(input, _builder) + + +@builtin +def cat(input, other, can_reorder=False, _builder=None): + """ + Concatenate the given blocks + + :param input: The first input tensor. + :type input: + :param other: The second input tensor. + :type other: + :param reorder: Compiler hint. If true, the compiler is + allowed to reorder elements while concatenating inputs. Only use if the + order does not matter (e.g., result is only used in reduction ops) + """ + return semantic.cat(input, other, can_reorder, _builder) + + +@builtin +def view(input, shape, _builder=None): + """ + Returns a tensor with the same elements as `input` but a different shape. + The order of the elements may not be preserved. + + :param input: The input tensor. + :type input: + :param shape: The desired shape. + :type shape: Tuple[int] + + """ + shape = _shape_check_impl(shape) + return semantic.view(input, shape, _builder) + + +@builtin +def reshape(input, shape, _builder=None): + """ + Returns a tensor with the same number of elements as input but with the + provided shape. + + :param input: The input tensor. + :type input: + :param shape: The new shape. + :type shape: Tuple[int] + """ + shape = _shape_check_impl(shape) + return semantic.reshape(input, shape, _builder) + + +def _wrap_axis(axis, ndim): + if not (-ndim <= axis < ndim): + raise ValueError(f"invalid axis {axis}. Expected {-ndim} <= axis < {ndim}") + + return axis if axis >= 0 else axis + ndim + + +@builtin +def expand_dims(input, axis, _builder=None): + """ + Expand the shape of a tensor, by inserting new length-1 dimensions. + + Axis indices are with respect to the resulting tensor, so + ``result.shape[axis]`` will be 1 for each axis. + + :param input: The input tensor. + :type input: tl.tensor + :param axis: The indices to add new axes + :type axis: int | Sequence[int] + + """ + axis = _constexpr_to_value(axis) + axes = list(axis) if isinstance(axis, Sequence) else [axis] + new_ndim = len(input.shape) + len(axes) + axes = [_wrap_axis(_constexpr_to_value(d), new_ndim) for d in axes] + + if len(set(axes)) != len(axes): + raise ValueError(f"expand_dims recieved duplicate axes, normalized axes = {axes}") + + ret = input + for a in sorted(axes): + ret = semantic.expand_dims(ret, a, _builder) + return ret + + +# ----------------------- +# Linear Algebra +# ----------------------- + + +@builtin +def dot(input, other, acc=None, allow_tf32=True, max_num_imprecise_acc=None, out_dtype=float32, _builder=None): + """ + Returns the matrix product of two blocks. + + The two blocks must be two-dimensional and have compatible inner dimensions. + + :param input: The first tensor to be multiplied. + :type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} + :param other: The second tensor to be multiplied. + :type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} + """ + allow_tf32 = _constexpr_to_value(allow_tf32) + out_dtype = _constexpr_to_value(out_dtype) + max_num_imprecise_acc = _constexpr_to_value(max_num_imprecise_acc) + return semantic.dot(input, other, acc, allow_tf32, max_num_imprecise_acc, out_dtype, _builder) + + +# ----------------------- +# Non-Atomic Memory Operations +# ----------------------- + + +@builtin +def load(pointer, mask=None, other=None, boundary_check=tuple(), padding_option="", cache_modifier="", + eviction_policy="", volatile=False, _builder=None): + """ + Return a tensor of data whose values are loaded from memory at location defined by `pointer`: + (1) `pointer` could be a single element pointer, then a scalar will be loaded + + - `mask` and `other` must be scalar too + - `other` is implicitly typecast to `pointer.dtype.element_ty` + - `boundary_check` and `padding_option` must be empty + + (2) `pointer` could be element-wise tensor of pointers, in which case: + + - `mask` and `other` are implicitly broadcast to `pointer.shape` + - `other` is implicitly typecast to `pointer.dtype.element_ty` + - `boundary_check` and `padding_option` must be empty + + (3) `pointer` could be a block pointer defined by `make_block_ptr`, in which case: + + - `mask` and `other` must be None + - `boundary_check` and `padding_option` can be specified to control the behavior of out-of-bound access + + :param pointer: Pointer to the data to be loaded + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param mask: if `mask[idx]` is false, do not load the data at address `pointer[idx]` + (must be `None` with block pointers) + :type mask: Block of `triton.int1`, optional + :param other: if `mask[idx]` is false, return `other[idx]` + :type other: Block, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param padding_option: should be one of {"", "zero", "nan"}, do padding while out of bound + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + :param volatile: changes volatile option in NVIDIA PTX + :type volatile: bool, optional + """ + # `mask` and `other` can be constexpr + if _constexpr_to_value(mask) is not None: + mask = _to_tensor(mask, _builder) + if _constexpr_to_value(other) is not None: + other = _to_tensor(other, _builder) + padding_option = _constexpr_to_value(padding_option) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + volatile = _constexpr_to_value(volatile) + return semantic.load(pointer, mask, other, boundary_check, padding_option, cache_modifier, eviction_policy, + volatile, _builder) + + +@builtin +def store(pointer, value, mask=None, boundary_check=(), cache_modifier="", eviction_policy="", _builder=None): + """ + Store a tensor of data into memory locations defined by `pointer`: + (1) `pointer` could be a single element pointer, then a scalar will be stored + + - `mask` must be scalar too + - `boundary_check` and `padding_option` must be empty + + (2) `pointer` could be element-wise tensor of pointers, in which case: + + - `mask` is implicitly broadcast to `pointer.shape` + - `boundary_check` must be empty + + (3) or `pointer` could be a block pointer defined by `make_block_ptr`, in which case: + + - `mask` must be None + - `boundary_check` can be specified to control the behavior of out-of-bound access + + `value` is implicitly broadcast to `pointer.shape` and typecast to `pointer.dtype.element_ty`. + + :param pointer: The memory location where the elements of `value` are stored + :type pointer: `triton.PointerType`, or block of `dtype=triton.PointerType` + :param value: The tensor of elements to be stored + :type value: Block + :param mask: If `mask[idx]` is false, do not store `value[idx]` at `pointer[idx]` + :type mask: Block of triton.int1, optional + :param boundary_check: tuple of integers, indicating the dimensions which should do the boundary check + :type boundary_check: tuple of ints, optional + :param cache_modifier: changes cache option in NVIDIA PTX + :type cache_modifier: str, optional + :param eviction_policy: changes eviction policy in NVIDIA PTX + :type eviction_policy: str, optional + """ + # `value` can be constexpr + value = _to_tensor(value, _builder) + if _constexpr_to_value(mask) is not None: + mask = _to_tensor(mask, _builder) + cache_modifier = _constexpr_to_value(cache_modifier) + eviction_policy = _constexpr_to_value(eviction_policy) + return semantic.store(pointer, value, mask, boundary_check, cache_modifier, eviction_policy, _builder) + + +@builtin +def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None): + """ + Returns a pointer to a block in a parent tensor + + :param base: The base pointer to the parent tensor + :param shape: The shape of the parent tensor + :param strides: The strides of the parent tensor + :param offsets: The offsets to the block + :param block_shape: The shape of the block + :param order: The order of the original data format + """ + return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder) + + +@builtin +def advance(base: tensor, offsets, _builder=None): + """ + Advance a block pointer + + :param base: the block pointer to advance + :param offsets: the offsets to advance, a tuple by dimension + """ + return semantic.advance(base, offsets, _builder) + + +# ----------------------- +# Atomic Memory Operations +# ----------------------- + + +def _add_atomic_docstr(name: str, has_cmp: bool = False) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = f""" + Performs an atomic {name} at the memory location specified by :code:`pointer`. + + Return the data stored at :code:`pointer` before the atomic operation. + + :param pointer: The memory locations to operate on + :type pointer: Block of dtype=triton.PointerDType""" + if has_cmp: + docstr += """ + :param cmp: The values expected to be found in the atomic object + :type cmp: Block of dtype=pointer.dtype.element_ty""" + docstr += """ + :param val: The values with which to perform the atomic operation + :type val: Block of dtype=pointer.dtype.element_ty + :param sem: Memory semantics to use ("ACQUIRE_RELEASE" (default), + "ACQUIRE", "RELEASE", or "RELAXED") + :type sem: str + :param scope: Scope of threads that observe synchronizing effect of the + atomic operation ("GPU" (default), "CTA", or "SYSTEM") + :type scope: str + """ + func.__doc__ = docstr + return func + + return _decorator + + +@builtin +@_add_atomic_docstr("compare-and-swap", has_cmp=True) +def atomic_cas(pointer, cmp, val, sem=None, scope=None, _builder=None): + cmp = _to_tensor(cmp, _builder) + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_cas(pointer, cmp, val, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("exchange") +def atomic_xchg(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_xchg(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("add") +def atomic_add(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_add(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("max") +def atomic_max(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_max(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("min") +def atomic_min(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_min(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical and") +def atomic_and(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_and(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical or") +def atomic_or(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_or(pointer, val, mask, sem, scope, _builder) + + +@builtin +@_add_atomic_docstr("logical xor") +def atomic_xor(pointer, val, mask=None, sem=None, scope=None, _builder=None): + val = _to_tensor(val, _builder) + sem = _constexpr_to_value(sem) + scope = _constexpr_to_value(scope) + return semantic.atomic_xor(pointer, val, mask, sem, scope, _builder) + + +# ----------------------- +# Conditioning +# ----------------------- + + +@builtin +def where(condition, x, y, _builder=None): + """ + Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`. + + Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`. + + If you want to avoid unintended memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead. + + The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`. + :code:`x` and :code:`y` must have the same data type. + + :param condition: When True (nonzero), yield x, otherwise yield y. + :type condition: Block of triton.bool + :param x: values selected at indices where condition is True. + :param y: values selected at indices where condition is False. + """ + condition = _to_tensor(condition, _builder) + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.where(condition, x, y, _builder) + + +# ----------------------- +# Math +# ----------------------- + + +@builtin +def umulhi(x, y, _builder=None): + """ + Returns the most significant 32 bits of the product of x and y. + + :param x: the input tensor + :type x: int32 + :param y: the input tensor + :type y: int32 + """ + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.umulhi(x, y, _builder) + + +@builtin +def fdiv(x, y, ieee_rounding=False, _builder=None): + """ + Returns a floating-point resultant tensor of dividing x by y. + + :param x: the input numerator value. + :param y: the input denominator value. + :param ieee_rounding: To follow IEEE-754 floating point number + rounding mechanism + :type ieee_rounding: bool + """ + ieee_rounding = _constexpr_to_value(ieee_rounding) + x = _to_tensor(x, _builder) + y = _to_tensor(y, _builder) + return semantic.fdiv(x, y, ieee_rounding, _builder) + + +def _add_math_1arg_docstr(name: str) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Computes the element-wise {name} of :code:`x`. + + :param x: the input values + :type x: Block + """ + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@builtin +@_add_math_1arg_docstr("exponential") +def exp(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.exp(x, _builder) + + +@builtin +@_add_math_1arg_docstr("natural logarithm") +def log(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.log(x, _builder) + + +@builtin +@_add_math_1arg_docstr("cosine") +def cos(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.cos(x, _builder) + + +@builtin +@_add_math_1arg_docstr("sine") +def sin(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.sin(x, _builder) + + +@builtin +@_add_math_1arg_docstr("square root") +def sqrt(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.sqrt(x, _builder) + + +@builtin +@_add_math_1arg_docstr("absolute value") +def abs(x, _builder=None): + x = _to_tensor(x, _builder) + return semantic.abs(x, _builder) + + +# ----------------------- +# Reductions +# ----------------------- + + +def _add_reduction_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the reduction should be done""" + if return_indices_arg is not None: + docstr += f""" + :param {return_indices_arg}: if true, return index corresponding to the {name} value""" + if tie_break_arg is not None: + docstr += f""" + :param {tie_break_arg}: if true, return the left-most indices in case of ties for values that aren't NaN""" + + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@contextmanager +def _insertion_guard(builder): + ip = builder.get_insertion_point() + yield + builder.restore_insertion_point(ip) + + +@builtin +def reduce(input, axis, combine_fn, _builder=None, _generator=None): + """Applies the combine_fn to all elements in :code:`input` tensors along the provided :code:`axis` + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + + """ + if isinstance(input, tensor): + return reduce((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(reduce_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = reduce_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_reduce_ret(*handles) + + if axis is not None: + axis = _constexpr_to_value(axis) + return semantic.reduction(input, axis, make_combine_region, _builder) + + +@builtin +def _promote_reduction_input(t, _builder=None): + scalar_ty = t.type.scalar + + # hardware doesn't support FMAX, FMIN, CMP for bfloat16 + if scalar_ty is bfloat16: + return t.to(float32, _builder=_builder) + return t + + +@builtin +def _reduce_with_indices(input, axis, combine_fn, _builder=None, _generator=None): + axis = _constexpr_to_value(axis) + n = input.shape[axis] + index = arange(0, n, _builder=_builder) + + if len(input.shape) > 1: + # Broadcast index across the non-reduced axes + axes_to_expand = [constexpr(d) for d in range(len(input.shape))] + del axes_to_expand[axis] + index = expand_dims(index, axes_to_expand, _builder=_builder) + index = broadcast_to(index, input.shape, _builder=_builder) + + rvalue, rindices = reduce((input, index), axis, combine_fn, _builder=_builder, _generator=_generator) + return rvalue, rindices + + +# ----------------------- +# Scans +# ----------------------- + + +def _add_scan_docstr(name: str, return_indices_arg: str = None, tie_break_arg: str = None) -> Callable[[T], T]: + + def _decorator(func: T) -> T: + docstr = """ + Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` + + :param input: the input values + :param axis: the dimension along which the scan should be done""" + func.__doc__ = docstr.format(name=name) + return func + + return _decorator + + +@builtin +def associative_scan(input, axis, combine_fn, _builder=None, _generator=None): + """Applies the combine_fn to each elements with a carry in :code:`input` tensors along the provided :code:`axis` and update the carry + + :param input: the input tensor, or tuple of tensors + :param axis: the dimension along which the reduction should be done + :param combine_fn: a function to combine two groups of scalar tensors (must be marked with @triton.jit) + + """ + if isinstance(input, tensor): + return associative_scan((input, ), axis, combine_fn, _builder=_builder, _generator=_generator)[0] + + def make_combine_region(scan_op): + in_scalar_tys = [t.type.scalar for t in input] + prototype = function_type(in_scalar_tys, in_scalar_tys * 2) + + region = scan_op.get_region(0) + with _insertion_guard(_builder): + param_types = [ty.to_ir(_builder) for ty in prototype.param_types] + block = _builder.create_block_with_parent(region, param_types) + args = [tensor(block.arg(i), ty) for i, ty in enumerate(prototype.param_types)] + results = _generator.call_JitFunction(combine_fn, args, kwargs={}) + if isinstance(results, tensor): + handles = [results.handle] + else: + handles = [r.handle for r in results] + _builder.create_scan_ret(*handles) + + axis = _constexpr_to_value(axis) + return semantic.associative_scan(input, axis, make_combine_region, _builder) + + +# ----------------------- +# Compiler Hint Ops +# ----------------------- + + +@builtin +def debug_barrier(_builder=None): + ''' + Insert a barrier to synchronize all threads in a block. + ''' + return semantic.debug_barrier(_builder) + + +@builtin +def multiple_of(input, values, _builder=None): + """ + Let the compiler know that the values in :code:`input` are all multiples of :code:`value`. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.multiple_of(input, values) + + +@builtin +def max_contiguous(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are contiguous. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_contiguous(input, values) + + +@builtin +def max_constancy(input, values, _builder=None): + """ + Let the compiler know that the `value` first values in :code:`input` are constant. + + e.g. if :code:`values` is [4], then each group of 4 values in :code:`input` should all be equal, + for example [0, 0, 0, 0, 1, 1, 1, 1]. + """ + if isinstance(values, constexpr): + values = [values] + for i, d in enumerate(values): + if not isinstance(d, constexpr): + raise TypeError(f"values element {i} must have type `constexpr`") + if not isinstance(d.value, int): + raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]") + values = [x.value for x in values] + return semantic.max_constancy(input, values) + + +# ----------------------- +# Debugging functions +# ----------------------- + + +@builtin +def static_print(*values, sep: str = " ", end: str = "\n", file=None, flush=False, _builder=None): + ''' + Print the values at compile time. The parameters are the same as the builtin :code:`print`. + + NOTE: Calling the Python builtin :code:`print` is not the same as calling this, it instead maps to :code:`device_print`, + which has special requirements for the arguments. + + .. highlight:: python + .. code-block:: python + + tl.static_print(f"{BLOCK_SIZE=}") + ''' + pass + + +@builtin +def static_assert(cond, msg="", _builder=None): + ''' + Assert the condition at compile time. Does not require that the :code:`TRITON_DEBUG` environment variable + is set. + + .. highlight:: python + .. code-block:: python + + tl.static_assert(BLOCK_SIZE == 1024) + ''' + pass + + +@builtin +def device_print(prefix, *args, _builder=None): + ''' + Print the values at runtime from the device. String formatting does not work for runtime values, so you should + provide the values you want to print as arguments. The first value must be a string, all following values must + be scalars or tensors. + + Calling the Python builtin :code:`print` is the same as calling this function, and the requirements for the arguments will match + this function (not the normal requirements for :code:`print`). + + .. highlight:: python + .. code-block:: python + + tl.device_print("pid", pid) + print("pid", pid) + + :param prefix: a prefix to print before the values. This is required to be a string literal. + :param args: the values to print. They can be any tensor or scalar. + ''' + import string + prefix = _constexpr_to_value(prefix) + assert isinstance(prefix, str), f"{prefix} is not string" + b_ascii = True + for ch in prefix: + if ch not in string.printable: + b_ascii = False + break + assert b_ascii, f"{prefix} is not an ascii string" + new_args = [] + for arg in args: + new_args.append(_to_tensor(arg, _builder)) + return semantic.device_print(prefix, new_args, _builder) + + +@builtin +def device_assert(cond, msg="", _builder=None): + ''' + Assert the condition at runtime from the device. Requires that the environment variable :code:`TRITON_DEBUG` + is set to a value besides :code:`0` in order for this to have any effect. + + Using the Python :code:`assert` statement is the same as calling this function, except that the second argument + must be provided and must be a string, e.g. :code:`assert pid == 0, "pid != 0"`. The environment variable must + be set for this :code:`assert` statement to have any effect. + + .. highlight:: python + .. code-block:: python + + tl.device_assert(pid == 0) + assert pid == 0, f"pid != 0" + + :param cond: the condition to assert. This is required to be a boolean tensor. + :param msg: the message to print if the assertion fails. This is required to be a string literal. + ''' + msg = _constexpr_to_value(msg) + import inspect + frame = inspect.currentframe() + module = inspect.getmodule(frame) + # The triton function module doesn't have the name attribute. + # We use this trick to find the caller. + while hasattr(module, "__name__"): + frame = frame.f_back + module = inspect.getmodule(frame) + lineno = 0 + func_name = 'unknown' + file_name = 'unknown' + if frame is not None and frame.f_back is not None: + func_name = frame.f_code.co_name + file_name = frame.f_back.f_code.co_filename + # TODO: The line number currently indicates the line + # where the triton function is called but not where the + # device_assert is called. Need to enhance this. + lineno = frame.f_back.f_lineno + return semantic.device_assert(_to_tensor(cond, _builder), msg, file_name, func_name, lineno, _builder) + + +@builtin +def inline_asm_elementwise(asm: str, constraints: str, args: list, dtype, is_pure: bool, pack: int, _builder=None): + ''' + Execute the inline assembly to a packed of elements of the tensor + :param asm: assembly to be inlined, it has to match the target assembly format + :param constraints: string representing the mapping of operands to register + :param args: the arguments of the operation + :param dtype: the element type of the returned variable + :param is_pure: whether the operation is pure + :param pack: the number of elements to be processed by one instance of inline assembly + :param _builder: the builder + :return: the return value of the function + ''' + asm = _constexpr_to_value(asm) + constraints = _constexpr_to_value(constraints) + pack = _constexpr_to_value(pack) + is_pure = _constexpr_to_value(is_pure) + res_ty = dtype + dispatch_args = [_to_tensor(arg, _builder) for arg in args] + if dispatch_args: + bin_op_type_checking = partial( + semantic.binary_op_type_checking_impl, + builder=_builder, + arithmetic_check=False, + allow_lhs_ptr=True, + allow_rhs_ptr=True, + ) + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for item in dispatch_args: + _, broadcast_arg = bin_op_type_checking(item, broadcast_arg) + if broadcast_arg.shape: + # Change the shape of each argument based on the broadcast shape + for i, item in enumerate(dispatch_args): + dispatch_args[i], _ = bin_op_type_checking(item, broadcast_arg) + res_ty = block_type(dtype, broadcast_arg.shape) + handles = [t.handle for t in dispatch_args] + call = _builder.create_inline_asm(asm, constraints, handles, res_ty.to_ir(_builder), is_pure, pack) + return tensor(call, res_ty) + + +# ----------------------- +# Iterators +# ----------------------- + + +class static_range: + """ + Iterator that counts upward forever. + + .. highlight:: python + .. code-block:: python + + @triton.jit + def kernel(...): + for i in tl.static_range(10): + ... + :note: This is a special iterator used to implement similar semantics to Python's :code:`range` in the context of + :code:`triton.jit` functions. In addition, it also guides the compiler to unroll the loop aggressively. + :param arg1: the start value. + :param arg2: the end value. + :param step: the step value. + """ + + def __init__(self, arg1, arg2=None, step=None): + assert isinstance(arg1, constexpr) + if step is None: + self.step = constexpr(1) + else: + assert isinstance(step, constexpr) + self.step = step + if arg2 is None: + self.start = constexpr(0) + self.end = arg1 + else: + assert isinstance(arg2, constexpr) + self.start = arg1 + self.end = arg2 + + def __iter__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + def __next__(self): + raise RuntimeError("static_range can only be used in @triton.jit'd functions") + + +# ----------------------- +# Extern functions +# ----------------------- + + +def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, + is_pure: bool, _builder=None): + ''' + Dispatch a function to a library + :param func: the function to dispatch + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param ret_shape: the shape of the return value + :param _builder: the builder + :return: the return value of the function + ''' + if len(arg_type_symbol_dict) == 0: + raise ValueError("arg_type_symbol_dict is empty") + + num_args = len(list(arg_type_symbol_dict.keys())[0]) + if len(args) != num_args: + raise ValueError(f"length of input args does not match." + f"Expect {len(args)}, got {num_args}") + + arg_types = [] + arg_list = [] + for arg in args: + if isinstance(arg, tensor): + arg_types.append(arg.dtype) + arg_list.append(arg.handle) + else: + arg_types.append(type(arg)) + arg_list.append(arg) + arg_types = tuple(arg_types) + + if arg_types not in arg_type_symbol_dict: + raise ValueError(f"input arg type does not match." + f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}") + else: + symbol = arg_type_symbol_dict[arg_types][0] + ret_type = arg_type_symbol_dict[arg_types][1] + if ret_shape: + ret_type = block_type(ret_type, ret_shape) + return tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder), is_pure), ret_type) + + +def extern_elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, is_pure: bool, + _builder=None): + ''' + Dispatch an elementwise function to a library + :param lib_name: the name of the library + :param lib_path: the path of the library + :param args: the arguments of the function + :param arg_type_symbol_dict: the type of the arguments + :param is_pure: whether the function is pure + :param _builder: the builder + :return: the return value of the function + ''' + dispatch_args = args.copy() + all_scalar = True + ret_shape = None + arg_types = [] + for i in range(len(dispatch_args)): + dispatch_args[i] = _to_tensor(dispatch_args[i], _builder) + arg_types.append(dispatch_args[i].dtype) + if dispatch_args[i].type.is_block(): + all_scalar = False + if len(arg_types) > 0: + arg_types = tuple(arg_types) + arithmetic_check = True + # If there's a type tuple that is not supported by the library, we will do arithmetic check + if arg_types in arg_type_symbol_dict: + arithmetic_check = False + broadcast_arg = dispatch_args[0] + # Get the broadcast shape over all the arguments + for i, item in enumerate(dispatch_args): + _, broadcast_arg = semantic.binary_op_type_checking_impl(item, broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + # Change the shape of each argument based on the broadcast shape + for i in range(len(dispatch_args)): + dispatch_args[i], _ = semantic.binary_op_type_checking_impl(dispatch_args[i], broadcast_arg, _builder, + arithmetic_check=arithmetic_check) + if not all_scalar: + ret_shape = broadcast_arg.shape + func = getattr(_builder, "create_extern_elementwise") + return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, is_pure, _builder) + + +def binary_op_type_legalization(lhs, rhs, builder): + ''' + Convert both operands to a single common type + :param lhs: the left operand + :param rhs: the right operand + :param builder: the builder + ''' + return semantic.binary_op_type_checking_impl(lhs, rhs, builder) + + +def extern(fn): + """A decorator for external functions.""" + return builtin(fn) diff --git a/venv/lib/python3.10/site-packages/triton/language/extra/__init__.py b/venv/lib/python3.10/site-packages/triton/language/extra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd0ff3eeee37a0d4f7d44ef36f66b41895ff09f --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/extra/__init__.py @@ -0,0 +1,3 @@ +from . import cuda + +__all__ = ['cuda'] diff --git a/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4f885bdc91bfaf25900ff4c8d713530dbdeac1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..126b9181ba747b902070ea4a87d847b7e888dbc6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/language/extra/cuda.py b/venv/lib/python3.10/site-packages/triton/language/extra/cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb494d9fa7fea9abafbb91bba149bd0461a3f4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/extra/cuda.py @@ -0,0 +1,18 @@ +from .. import core + + +@core.extern +def globaltimer(_builder=None): + return core.inline_asm_elementwise("mov.u64 $0, %globaltimer;", "=l", [], dtype=core.int64, is_pure=False, pack=1, + _builder=_builder) + + +@core.extern +def smid(_builder=None): + return core.inline_asm_elementwise("mov.u32 $0, %smid;", "=r", [], dtype=core.int32, is_pure=True, pack=1, + _builder=_builder) + + +@core.builtin +def num_threads(_builder=None): + return core.constexpr(_builder.options.num_warps * 32) diff --git a/venv/lib/python3.10/site-packages/triton/language/math.py b/venv/lib/python3.10/site-packages/triton/language/math.py new file mode 100644 index 0000000000000000000000000000000000000000..1cbad660d780bc10a541d7274890c1c2500e966c --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/math.py @@ -0,0 +1,1676 @@ +import functools +import os + +from ..common.build import is_hip +from . import core + + +@functools.lru_cache() +def libdevice_path(): + third_party_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "third_party") + if is_hip(): + default = os.path.join(third_party_dir, "hip", "lib", "bitcode", "cuda2gcn.bc") + else: + default = os.path.join(third_party_dir, "cuda", "lib", "libdevice.10.bc") + + return os.getenv("TRITON_LIBDEVICE_PATH", default) + + +@core.extern +def clz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_clz", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_clzll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def popc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_popc", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_popcll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def byte_perm(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("int32"), core.dtype("int32"), core.dtype("int32")): ("__nv_byte_perm", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def min(arg0, arg1, _builder=None): + arg0 = core._to_tensor(arg0, _builder) + arg1 = core._to_tensor(arg1, _builder) + arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder) + dtype = arg0.dtype + if dtype.is_floating(): + return core.tensor(_builder.create_minf(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_signed(): + return core.tensor(_builder.create_minsi(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_unsigned(): + return core.tensor(_builder.create_minui(arg0.handle, arg1.handle), arg0.dtype) + else: + assert False, f"Unexpected dtype {dtype}" + + +@core.extern +def max(arg0, arg1, _builder=None): + arg0 = core._to_tensor(arg0, _builder) + arg1 = core._to_tensor(arg1, _builder) + arg0, arg1 = core.binary_op_type_legalization(arg0, arg1, _builder) + dtype = arg0.dtype + if dtype.is_floating(): + return core.tensor(_builder.create_maxf(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_signed(): + return core.tensor(_builder.create_maxsi(arg0.handle, arg1.handle), arg0.type) + elif dtype.is_int_unsigned(): + return core.tensor(_builder.create_maxui(arg0.handle, arg1.handle), arg0.dtype) + else: + assert False, f"Unexpected dtype {dtype}" + + +@core.extern +def mulhi(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_mulhi", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_umulhi", core.dtype("uint32")), + (core.dtype("int64"), core.dtype("int64")): ("__nv_mul64hi", core.dtype("int64")), + (core.dtype("uint64"), core.dtype("uint64")): ("__nv_umul64hi", core.dtype("uint64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul24(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_mul24", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_umul24", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def brev(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_brev", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_brevll", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sad(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("int32"), core.dtype("int32"), core.dtype("uint32")): ("__nv_sad", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32")): ("__nv_usad", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def abs(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_abs", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_llabs", core.dtype("int64")), + (core.dtype("fp32"), ): ("__nv_fabsf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_fabs", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def floor(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_floorf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_floor", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp64h(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_rcp64h", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rsqrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_rsqrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rsqrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ceil(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_ceil", core.dtype("fp64")), + (core.dtype("fp32"), ): ("__nv_ceilf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def trunc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_trunc", core.dtype("fp64")), + (core.dtype("fp32"), ): ("__nv_truncf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp2(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_exp2f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def saturatef(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_saturatef", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rn(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rz(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_rd(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma_ru(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_dividef(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_fdividef", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def div_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdiv_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_ddiv_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rn(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rn", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rz", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_rd(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_rd", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcp_ru(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_frcp_ru", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_drcp_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rn(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rn", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rz(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rz", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_rd(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_rd", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt_ru(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fsqrt_ru", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_dsqrt_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sqrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sqrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sqrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def add_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dadd_ru", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fadd_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dmul_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmul_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def mul_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + arg1, + ], { + ( + core.dtype("fp64"), + core.dtype("fp64"), + ): ("__nv_dmul_ru", core.dtype("fp64")), + ( + core.dtype("fp32"), + core.dtype("fp32"), + ): ("__nv_fmul_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2int_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2int_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2uint_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2uint_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2int_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2int_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rn", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rz", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_rd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2uint_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2uint_ru", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hiloint2double(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_hiloint2double", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2loint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2loint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2hiint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2hiint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ll_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ll_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float2ull_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float2ull_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ll_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ll_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rn", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rz", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_rd", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double2ull_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double2ull_ru", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rz", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_rd", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2float_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2float_ru", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ll2double_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_ll2double_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rz(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_rd(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ull2double_ru(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint64"), ): ("__nv_ull2double_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def int_as_float(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int32"), ): ("__nv_int_as_float", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float_as_int(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float_as_int", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def uint_as_float(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("uint32"), ): ("__nv_uint_as_float", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def float_as_uint(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_float_as_uint", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def longlong_as_double(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("int64"), ): ("__nv_longlong_as_double", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def double_as_longlong(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_double_as_longlong", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_sinf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_sinf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_cosf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_cosf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_log2f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_log2f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_logf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_logf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_expf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_expf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_tanf(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_tanf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_exp10f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_exp10f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_log10f(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_fast_log10f", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fast_powf(arg0, arg1, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fast_powf", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hadd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_hadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_uhadd", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rhadd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("int32")): ("__nv_rhadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32")): ("__nv_urhadd", core.dtype("uint32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rz(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rz", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_rd(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_rd", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sub_ru(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fsub_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_dsub_ru", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rsqrt_rn(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_frsqrt_rn", core.dtype("fp32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ffs(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("int32"), ): ("__nv_ffs", core.dtype("int32")), + (core.dtype("int64"), ): ("__nv_ffsll", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_rintf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rint", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def llrint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_llrintf", core.dtype("int64")), + (core.dtype("fp64"), ): ("__nv_llrint", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nearbyint(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_nearbyintf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_nearbyint", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isnan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_isnanf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_isnand", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def signbit(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [ + arg0, + ], { + (core.dtype("fp32"), ): ("__nv_signbitf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_signbitd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def copysign(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_copysignf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_copysign", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def finitef(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_finitef", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isinf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_isinff", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_isinfd", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def nextafter(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_nextafterf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_nextafter", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sin(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sin", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cos(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cosf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cos", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sinpi(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinpif", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sinpi", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cospi(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cospif", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cospi", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tanf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tan", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log2(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log2f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_expf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def exp10(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_exp10f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_exp10", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cosh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_coshf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cosh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def sinh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_sinhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_sinh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tanh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tanhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tanh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan2(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_atan2f", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_atan2", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atan(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_atanf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_atan", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asin(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_asinf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_asin", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acos(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_acosf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_acos", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_logf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log10(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log10f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log10", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def log1p(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_log1pf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_log1p", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def acosh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_acoshf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_acosh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def asinh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_asinhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_asinh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def atanh(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_atanhf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_atanh", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def expm1(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_expm1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_expm1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def hypot(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_hypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_hypot", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rhypot(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_rhypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_rhypot", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def norm3d(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_norm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_norm3d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rnorm3d(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_rnorm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_rnorm3d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def norm4d(arg0, arg1, arg2, arg3, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): + ("__nv_norm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): + ("__nv_norm4d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rnorm4d(arg0, arg1, arg2, arg3, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2, arg3], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): + ("__nv_rnorm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): + ("__nv_rnorm4d", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cbrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cbrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cbrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def rcbrt(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_rcbrtf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_rcbrt", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j0(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_j0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_j0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def j1(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_j1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_j1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y0(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_y0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_y0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def y1(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_y1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_y1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def yn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("fp32")): ("__nv_ynf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64")): ("__nv_yn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def jn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("int32"), core.dtype("fp32")): ("__nv_jnf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64")): ("__nv_jn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i0(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cyl_bessel_i0f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cyl_bessel_i0", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def cyl_bessel_i1(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_cyl_bessel_i1f", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_cyl_bessel_i1", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erff", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erf", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfc(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfc", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfcx(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcxf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfcx", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def erfcinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_erfcinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_erfcinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def normcdfinv(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_normcdfinvf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_normcdfinv", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def normcdf(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_normcdff", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_normcdf", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def lgamma(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_lgammaf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_lgamma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ldexp(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_ldexpf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_ldexp", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def scalbn(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_scalbnf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_scalbn", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fmod(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmodf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_fmod", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def remainder(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_remainderf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_remainder", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fma(arg0, arg1, arg2, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1, arg2], { + (core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32")): ("__nv_fmaf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64")): ("__nv_fma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def pow(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("int32")): ("__nv_powif", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32")): ("__nv_powi", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_powf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_pow", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def tgamma(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_tgammaf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_tgamma", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def round(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_roundf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_round", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def llround(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_llroundf", core.dtype("int64")), + (core.dtype("fp64"), ): ("__nv_llround", core.dtype("int64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def fdim(arg0, arg1, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0, arg1], { + (core.dtype("fp32"), core.dtype("fp32")): ("__nv_fdimf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64")): ("__nv_fdim", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def ilogb(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_ilogbf", core.dtype("int32")), + (core.dtype("fp64"), ): ("__nv_ilogb", core.dtype("int32")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def logb(arg0, _builder=None): + return core.extern_elementwise( + "libdevice", libdevice_path(), [arg0], { + (core.dtype("fp32"), ): ("__nv_logbf", core.dtype("fp32")), + (core.dtype("fp64"), ): ("__nv_logb", core.dtype("fp64")), + }, is_pure=True, _builder=_builder) + + +@core.extern +def isfinited(arg0, _builder=None): + return core.extern_elementwise("libdevice", libdevice_path(), [arg0], { + (core.dtype("fp64"), ): ("__nv_isfinited", core.dtype("int32")), + }, is_pure=True, _builder=_builder) diff --git a/venv/lib/python3.10/site-packages/triton/language/random.py b/venv/lib/python3.10/site-packages/triton/language/random.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9d53bfb3e74fb74ebfb7fa5ad29d035471897b --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/random.py @@ -0,0 +1,202 @@ +from ..runtime.jit import jit +from . import core as tl +from . import standard + +N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox + +# ------------------- +# randint +# ------------------- + + +@jit +def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1). + """ + if c0.dtype == tl.uint32: + PHILOX_KEY_A: tl.constexpr = 0x9E3779B9 + PHILOX_KEY_B: tl.constexpr = 0xBB67AE85 + PHILOX_ROUND_A: tl.constexpr = 0xD2511F53 + PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57 + else: + tl.static_assert(c0.dtype == tl.uint64, "dtype not supported in philox_impl") + PHILOX_KEY_A: tl.constexpr = 0x9E3779B97F4A7C15 + PHILOX_KEY_B: tl.constexpr = 0xBB67AE8584CAA73B + PHILOX_ROUND_A: tl.constexpr = 0xD2E7470EE14C6C93 + PHILOX_ROUND_B: tl.constexpr = 0xCA5A826395121157 + + for _ in tl.static_range(n_rounds): + # for _ in range(n_rounds): + # update random state + A = PHILOX_ROUND_A + B = PHILOX_ROUND_B + _c0, _c2 = c0, c2 + c0 = tl.umulhi(B, _c2) ^ c1 ^ k0 + c2 = tl.umulhi(A, _c0) ^ c3 ^ k1 + c1 = B * _c2 + c3 = A * _c0 + # raise key + k0 = k0 + PHILOX_KEY_A + k1 = k1 + PHILOX_KEY_B + return c0, c1, c2, c3 + + +@jit +def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + seed = seed.to(tl.uint64) + if tl.constexpr(c0.dtype.primitive_bitwidth) == 32: + int_dtype = tl.uint32 + seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32) + seed_lo = (seed & 0xffffffff).to(tl.uint32) + else: + tl.static_assert(tl.constexpr(c0.dtype.primitive_bitwidth) == 64, "bitwidth not supported in philox") + int_dtype = tl.uint64 + seed_hi = 0 + seed_lo = seed + c0 = c0.to(int_dtype, bitcast=True) + c1 = c1.to(int_dtype, bitcast=True) + c2 = c2.to(int_dtype, bitcast=True) + c3 = c3.to(int_dtype, bitcast=True) + return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds) + + +@jit +def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, returns a single + block of random :code:`int32`. + + If you need multiple streams of random numbers, + using `randint4x` is likely to be faster than calling `randint` 4 times. + + :param seed: The seed for generating random numbers. + :param offset: The offsets to generate random numbers for. + """ + ret, _, _, _ = randint4x(seed, offset, n_rounds) + return ret + + +@jit +def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, returns four + blocks of random :code:`int32`. + + This is the maximally efficient entry point + to Triton's Philox pseudo-random number generator. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + # _0 = tl.zeros(offset.shape, offset.dtype) + _0 = offset * 0 + return philox(seed, offset, _0, _0, _0, n_rounds) + + +# ------------------- +# rand +# ------------------- + +# @jit +# def uint32_to_uniform_float(x): +# """ +# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1). +# """ +# two_to_the_minus_32: tl.constexpr = 2.328306e-10 +# return x * two_to_the_minus_32 + + +@jit +def uint_to_uniform_float(x): + """ + Numerically stable function to convert a random uint into a random float uniformly sampled in [0, 1). + """ + # TODO: fix frontend issues and cleanup + # conditions can be simplified + # scale is ((2**23 - 1) / 2**23) * 2**(N_BITS - 1) + if tl.constexpr(x.dtype == tl.uint32) or tl.constexpr(x.dtype == tl.int32): + # maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + x = x.to(tl.int32, bitcast=True) + scale = 4.6566127342e-10 + else: + tl.static_assert(tl.constexpr(x.dtype == tl.uint64) or tl.constexpr(x.dtype == tl.int64)) + x = x.to(tl.int64, bitcast=True) + scale = 1.0842020432385337e-19 + x = tl.where(x < 0, -x - 1, x) + return x * scale + + +@jit +def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns a block of random :code:`float32` in :math:`U(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + source = randint(seed, offset, n_rounds) + return uint_to_uniform_float(source) + + +@jit +def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offsets` block, + returns 4 blocks of random :code:`float32` in :math:`U(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds) + u1 = uint_to_uniform_float(i1) + u2 = uint_to_uniform_float(i2) + u3 = uint_to_uniform_float(i3) + u4 = uint_to_uniform_float(i4) + return u1, u2, u3, u4 + + +# ------------------- +# randn +# ------------------- + + +@jit +def pair_uniform_to_normal(u1, u2): + """Box-Muller transform""" + u1 = standard.maximum(1.0e-7, u1) + th = 6.283185307179586 * u2 + r = tl.sqrt(-2.0 * tl.log(u1)) + return r * tl.cos(th), r * tl.sin(th) + + +@jit +def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + i1, i2, _, _ = randint4x(seed, offset, n_rounds) + u1 = uint_to_uniform_float(i1) + u2 = uint_to_uniform_float(i2) + n1, _ = pair_uniform_to_normal(u1, u2) + return n1 + + +@jit +def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT): + """ + Given a :code:`seed` scalar and an :code:`offset` block, + returns 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`. + + :param seed: The seed for generating random numbers. + :param offsets: The offsets to generate random numbers for. + """ + u1, u2, u3, u4 = rand4x(seed, offset, n_rounds) + n1, n2 = pair_uniform_to_normal(u1, u2) + n3, n4 = pair_uniform_to_normal(u3, u4) + return n1, n2, n3, n4 diff --git a/venv/lib/python3.10/site-packages/triton/language/semantic.py b/venv/lib/python3.10/site-packages/triton/language/semantic.py new file mode 100644 index 0000000000000000000000000000000000000000..c1ee1036ba6f8097546cfbb9c5076b59183981d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/semantic.py @@ -0,0 +1,1549 @@ +from __future__ import annotations # remove after python 3.11 + +from functools import wraps +from typing import List, Optional, Sequence, Tuple, TypeVar + +from .._C.libtriton.triton import ir +from ..common.build import is_hip +from . import core as tl + +T = TypeVar('T') + + +class IncompatibleTypeErrorImpl(Exception): + + def __init__(self, type_a, type_b): + self.type_a = type_a + self.type_b = type_b + self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__() + super(IncompatibleTypeErrorImpl, self).__init__(self.message) + + +# ===----------------------------------------------------------------------===## +# Programming Model +# ===----------------------------------------------------------------------===## + + +def program_id(axis: int, builder: ir.builder) -> tl.tensor: + if axis not in (0, 1, 2): + raise ValueError(f"program_id axis must be 0, 1, or 2 but got {axis}") + return tl.tensor(builder.create_get_program_id(axis), tl.int32) + + +def num_programs(axis: int, builder: ir.builder) -> tl.tensor: + if axis not in (0, 1, 2): + raise ValueError(f"num_programs axis must be 0, 1, or 2 but got {axis}") + return tl.tensor(builder.create_get_num_programs(axis), tl.int32) + + +# ===----------------------------------------------------------------------===// +# Implicit Casting Utilities +# ===----------------------------------------------------------------------===// + + +def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype: + a_rank = a_ty.int_bitwidth + b_rank = b_ty.int_bitwidth + a_sn = a_ty.int_signedness + b_sn = b_ty.int_signedness + # Rules for signedness taken from "Usual arithmetic conversions" on + # https://en.cppreference.com/w/c/language/conversion. + if a_sn == b_sn: + return a_ty if a_rank > b_rank else b_ty + elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED: + return a_ty if a_rank >= b_rank else b_ty + elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED: + return b_ty if b_rank >= a_rank else a_ty + assert False + + +def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype: + # 1) if one operand is double, the other is implicitly + # converted to double + if a_ty.is_fp64() or b_ty.is_fp64(): + return tl.float64 + # 2) if one operand is float, the other is implicitly + # converted to float + if a_ty.is_fp32() or b_ty.is_fp32(): + return tl.float32 + # 3 ) if one operand is half, the other is implicitly converted to half + # unless we're doing / or %, which do not exist natively in PTX for fp16. + # Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp + if a_ty.is_fp16() or b_ty.is_fp16(): + if div_or_mod: + return tl.float32 + else: + return tl.float16 + # 4) return bf16 only if both operands are of bf16 + if a_ty.is_bf16() or b_ty.is_bf16(): + if div_or_mod: + return tl.float32 + if a_ty.is_bf16() and b_ty.is_bf16(): + return tl.bfloat16 + return tl.float32 + if not a_ty.is_int() or not b_ty.is_int(): + assert False + # 5 ) both operands are integer and undergo + # integer promotion + if div_or_mod and a_ty.int_signedness != b_ty.int_signedness: + raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() + + " because they have different signedness;" + "this is unlikely to result in a useful answer. Cast them to the same signedness.") + return integer_promote_impl(a_ty, b_ty) + + +# ===----------------------------------------------------------------------===// +# Binary Operators +# ===----------------------------------------------------------------------===// + + +def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None: + if type_a.is_ptr(): + if not allow_ptr_a: + raise IncompatibleTypeErrorImpl(type_a, type_b) + # T* + U* with T != U + if type_b.is_ptr() and (type_a != type_b): + raise IncompatibleTypeErrorImpl(type_a, type_b) + # T* + float + if type_b.is_floating(): + raise IncompatibleTypeErrorImpl(type_a, type_b) + + +def binary_op_type_checking_impl(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder, allow_lhs_ptr=False, + allow_rhs_ptr=False, arithmetic_check=True, + div_or_mod=False) -> Tuple[tl.tensor, tl.tensor]: + # implicit broadcasting + lhs, rhs = broadcast_impl_value(lhs, rhs, builder) + # implicit typecasting + lhs_sca_ty = lhs.type.scalar + rhs_sca_ty = rhs.type.scalar + check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr) + check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr) + if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr(): + ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod) + lhs = cast(lhs, ret_sca_ty, builder) + rhs = cast(rhs, ret_sca_ty, builder) + return lhs, rhs + + +def add(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_ptr() and other_scalar_ty.is_ptr(): + raise ValueError("cannot add pointers together") + + # offset + ptr + # ptr + offset + if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr(): + input, other = other, input + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_ptr(): + return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type) + # float + float + elif input_scalar_ty.is_floating(): + return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type) + # int + int + elif input_scalar_ty.is_int(): + return tl.tensor(builder.create_add(input.handle, other.handle), input.type) + assert False + + +def sub(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, True, False) + scalar_ty = input.type.scalar + # ptr - offset + if scalar_ty.is_ptr(): + return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type) + # float - float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type) + # int - int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_sub(input.handle, other.handle), input.type) + assert False + + +def mul(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float * float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type) + # * int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_mul(input.handle, other.handle), input.type) + assert False + + +def truediv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + # float / int + if input_scalar_ty.is_floating() and other_scalar_ty.is_int(): + other = cast(other, input_scalar_ty, builder) + # int / float + elif input_scalar_ty.is_int() and other_scalar_ty.is_floating(): + input = cast(input, other_scalar_ty, builder) + # int / int (cast to tl.float32) + elif input_scalar_ty.is_int() and other_scalar_ty.is_int(): + input = cast(input, tl.float32, builder) + other = cast(other, tl.float32, builder) + # float / float (cast to the highest exponent type) + elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating(): + if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width: + other = cast(other, input_scalar_ty, builder) + else: + input = cast(input, other_scalar_ty, builder) + # unreachable + else: + assert False + return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type) + + +def floordiv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if input_scalar_ty.is_int() and other_scalar_ty.is_int(): + ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty) + input = cast(input, ret_ty, builder) + other = cast(other, ret_ty, builder) + if ret_ty.is_int_signed(): + return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type) + else: + return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type) + assert False + + +def fdiv(input: tl.tensor, other: tl.tensor, ieee_rounding: bool, builder: ir.builder) -> tl.tensor: + input_scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating(): + raise ValueError("both operands of fdiv must have floating scalar type") + input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True) + ret = builder.create_fdiv(input.handle, other.handle) + return tl.tensor(ret, input.type) + + +def mod(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True) + scalar_ty = input.type.scalar + other_scalar_ty = other.type.scalar + # float % float + if scalar_ty.is_floating(): + # input - input.div(other, rounding_mode="floor") * other + ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), other, builder), builder) + return ret + # % int + elif scalar_ty.is_int(): + if scalar_ty.int_signedness != other_scalar_ty.int_signedness: + raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " " + "because they have different signedness;" + "this is unlikely to result in a useful answer. Cast them to the same signedness.") + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_srem(input.handle, other.handle), input.type) + else: + return tl.tensor(builder.create_urem(input.handle, other.handle), input.type) + assert False + + +############## +# bitwise ops +############## + + +def bitwise_op_type_checking_impl(input: tl.tensor, other: tl.tensor, + builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]: + input, other = binary_op_type_checking_impl(input, other, builder, False, False, False) + input_sca_ty = input.type.scalar + other_sca_ty = other.type.scalar + if not input_sca_ty.is_int() or not other_sca_ty.is_int(): + raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty) + ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty) + if ret_sca_ty != input_sca_ty: + input = cast(input, ret_sca_ty, builder) + if ret_sca_ty != other_sca_ty: + other = cast(other, ret_sca_ty, builder) + return input, other + + +def and_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_and(input.handle, other.handle), input.type) + + +def or_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_or(input.handle, other.handle), input.type) + + +def xor_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_xor(input.handle, other.handle), input.type) + + +def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return and_(input, other, builder) + + +def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return or_(input, other, builder) + + +def not_(input: tl.tensor, builder: ir.builder): + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + return invert(input, builder) + + +def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type) + + +def ashr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_ashr(input.handle, other.handle), input.type) + + +def shl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = bitwise_op_type_checking_impl(input, other, builder) + return tl.tensor(builder.create_shl(input.handle, other.handle), input.type) + + +# ===----------------------------------------------------------------------===// +# Unary Operators +# ===----------------------------------------------------------------------===// + + +def plus(input: tl.tensor) -> tl.tensor: + return input + + +def minus(input: tl.tensor, builder: ir.builder) -> tl.tensor: + input_sca_ty = input.type.scalar + if input_sca_ty.is_ptr(): + raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")") + _0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) + return sub(_0, input, builder) + + +def invert(input: tl.tensor, builder: tl.tensor) -> tl.tensor: + input_sca_ty = input.type.scalar + if input_sca_ty.is_ptr() or input_sca_ty.is_floating(): + raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")") + _1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) + return xor_(input, _1, builder) + + +# ===----------------------------------------------------------------------===// +# Comparison Operators +# ===----------------------------------------------------------------------===// +def _bool_like(v: tl.tensor) -> tl.block_type: + if not v.type.is_block(): + return tl.int1 + shape = v.type.shape + return tl.block_type(tl.int1, shape) + + +def greater_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float > float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input)) + # > int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input)) + assert False + + +def greater_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float >= float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input)) + # >= int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input)) + assert False + + +def less_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float < float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input)) + # < int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input)) + assert False + + +def less_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float < float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input)) + # < int + elif scalar_ty.is_int(): + if scalar_ty.is_int_signed(): + return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input)) + else: + return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input)) + assert False + + +def equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float == float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input)) + # == int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input)) + assert False + + +def not_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + input, other = binary_op_type_checking_impl(input, other, builder) + scalar_ty = input.type.scalar + # float == float + if scalar_ty.is_floating(): + return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input)) + # == int + elif scalar_ty.is_int(): + return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input)) + assert False + + +# ===----------------------------------------------------------------------===// +# Block Creation +# ===----------------------------------------------------------------------===// + + +def arange(start: int, end: int, builder: ir.builder) -> tl.tensor: + if not isinstance(start, int) or not isinstance(end, int): + raise ValueError("arange's arguments must be of type tl.constexpr") + is_start_int64 = bool(start >> 32) + is_end_int64 = bool(end >> 32) + if is_start_int64 or is_end_int64: + raise ValueError("arange must fit in int32") + if end <= start: + raise ValueError("arange's end argument must be greater than the start argument") + + shape = [end - start] + ret_ty = tl.block_type(tl.int32, shape) + return tl.tensor(builder.create_make_range(start, end), ret_ty) + + +def full(shape: List[int], value, dtype: tl.dtype, builder: ir.builder) -> tl.tensor: + if isinstance(value, tl.tensor): + assert value.numel.value == 1, "only accepts size-1 tensor" + value = cast(value, dtype, builder) + else: + # scalar + if dtype is None: + raise ValueError("dtype must be specified when value is not a tensor") + if value == 0: + value = builder.get_null_value(dtype.to_ir(builder)) + else: + get_value_fn = getattr(builder, f"get_{dtype.name}") + value = get_value_fn(value) + value = tl.tensor(value, dtype) + + return splat(value, shape, builder) + + +# ===----------------------------------------------------------------------===// +# Shape Manipulation +# ===----------------------------------------------------------------------===// + + +def splat(value: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: + assert not value.type.is_block(), "Cannot splat a block tensor" + if len(shape) == 0: + return value + ret_ty = tl.block_type(value.dtype, shape) + return tl.tensor(builder.create_splat(value.handle, shape), ret_ty) + + +def view(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: + numel = 1 + for s in dst_shape: + numel *= s + if input.type.numel != numel: + raise ValueError("cannot view block of different shape") + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_reshape(input.handle, dst_shape, True), ret_ty) + + +def reshape(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor: + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_reshape(input.handle, dst_shape, False), ret_ty) + + +def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: + dst_shape = [tl._constexpr_to_value(x) for x in input.shape] + dst_shape.insert(axis, 1) + + if not input.type.is_block(): + return splat(input, shape=dst_shape, builder=builder) + + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty) + + +def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor: + assert can_reorder, "current implementation of `cat` always may reorder elements" + assert len(lhs.shape) == 1 + ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]]) + return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type) + + +def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor: + if len(input.shape) != 2: + raise ValueError("Only 2D tensors can be transposed") + ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]]) + return tl.tensor(builder.create_trans(input.handle), ret_type) + + +def broadcast_impl_shape(input: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor: + if not input.type.is_block(): + ret_ty = tl.block_type(input.type, shape) + return tl.tensor(builder.create_splat(input.handle, shape), ret_ty) + src_shape = input.type.get_block_shapes() + if len(src_shape) != len(shape): + raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}") + if shape == src_shape: + return input + for i, item in enumerate(src_shape): + if shape[i] != item and item != 1: + raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})" + f" must match the existing size ({item}) at non-singleton dimension" + f" {i}: {src_shape}, {shape}") + ret_ty = tl.block_type(input.type.scalar, shape) + return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty) + + +def broadcast_impl_value(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor: + lhs_ty = lhs.type + rhs_ty = rhs.type + + # make_shape_compatible(block, scalar) + if lhs_ty.is_block() and not rhs_ty.is_block(): + rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape) + rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty) + # make_shape_compatible(scalar, block) + elif not lhs_ty.is_block() and rhs_ty.is_block(): + lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape) + lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty) + # make_shape_compatible(block, block) + elif lhs_ty.is_block() and rhs_ty.is_block(): + lhs_shape = lhs_ty.get_block_shapes() + rhs_shape = rhs_ty.get_block_shapes() + + if len(lhs_shape) < len(rhs_shape): + # Add new axes to lhs + for dim in range(len(lhs_shape), len(rhs_shape)): + lhs = tl.tensor(builder.create_expand_dims(lhs.handle, 0), + tl.block_type(lhs_ty.scalar, [1] + lhs_shape)) + lhs_ty = lhs.type + lhs_shape = lhs_ty.get_block_shapes() + elif len(rhs_shape) < len(lhs_shape): + # Add new axes to rhs + for dim in range(len(rhs_shape), len(lhs_shape)): + rhs = tl.tensor(builder.create_expand_dims(rhs.handle, 0), + tl.block_type(rhs_ty.scalar, [1] + rhs_shape)) + rhs_ty = rhs.type + rhs_shape = rhs_ty.get_block_shapes() + assert len(rhs_shape) == len(lhs_shape) + + ret_shape = [] + for i, left in enumerate(lhs_shape): + right = rhs_shape[i] + if left == 1: + ret_shape.append(right) + elif right == 1: + ret_shape.append(left) + elif left == right: + ret_shape.append(left) + else: + raise ValueError("Cannot make_shape_compatible: incompatible dimensions " + "at index " + str(i) + ": " + str(left) + " and " + str(right)) + if lhs_shape != ret_shape: + ret_ty = tl.block_type(lhs_ty.scalar, ret_shape) + lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty) + if rhs_shape != ret_shape: + ret_ty = tl.block_type(rhs_ty.scalar, ret_shape) + rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty) + # (scalar, scalar) => returns original blocks + return lhs, rhs + + +####### +# cast +####### + + +def bitcast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: + src_ty = input.type + if src_ty.is_block(): + dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) + if src_ty == dst_ty: + return input + src_sca_ty = src_ty.scalar + dst_sca_ty = dst_ty.scalar + if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr(): + return cast(input, dst_ty, builder) + # Bitcast + src_bits = src_sca_ty.primitive_bitwidth + dst_bits = dst_sca_ty.primitive_bitwidth + if src_bits != dst_bits: + raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + " to " + "data-type of size " + str(dst_bits)) + return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) + + +def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: + src_ty = input.type + if isinstance(dst_ty, tl.constexpr): + dst_ty = dst_ty.value + if src_ty.is_block(): + dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes()) + if src_ty == dst_ty: + return input + + src_sca_ty = src_ty.scalar + dst_sca_ty = dst_ty.scalar + + if (src_sca_ty.is_fp8e4nv() or dst_sca_ty.is_fp8e4nv()): + assert builder.options.allow_fp8e4nv, "fp8e4nv data type is not supported on CUDA arch < 89" + + # Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64 + if (src_sca_ty.is_fp8() and dst_sca_ty.is_floating()) or \ + (src_sca_ty.is_floating() and dst_sca_ty.is_fp8()): + return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # bf16 <=> (not fp32) + if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \ + (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()): + return cast(cast(input, tl.float32, builder), dst_sca_ty, builder) + + # Standard floating types' casting: truncation + # fp64 => fp32, fp16, bf16 + # fp32 => fp16, bf16 + truncate_fp = src_sca_ty.is_floating() and \ + dst_sca_ty.is_floating() and \ + src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth + if truncate_fp: + return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Standard floating types' casting: extension + # fp32 => fp64 + # fp16 => fp32, fp64 + # bf16 => fp32, fp64 + ext_fp = src_sca_ty.is_floating() and \ + dst_sca_ty.is_floating() and \ + src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth + if ext_fp: + return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting between integer types + if src_sca_ty.is_int() and dst_sca_ty.is_int() and \ + (src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness): + sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool() + if dst_sca_ty.is_bool(): + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) + else: + return tl.tensor(builder.create_int_cast(input.handle, dst_ty.to_ir(builder), sign_extend), dst_ty) + + # Casting standard floating types to integer types + if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int(): + if dst_sca_ty.is_bool(): + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) + elif dst_sca_ty.is_int_signed(): + return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty) + else: + return tl.tensor(builder.create_fp_to_ui(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting integer types to standard floating types + if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating(): + if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed(): + return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + else: + return tl.tensor(builder.create_si_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting pointer types to integer types + if src_sca_ty.is_ptr() and dst_sca_ty.is_int(): + bitwidth = dst_sca_ty.int_bitwidth + if bitwidth == 64: + return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)), dst_ty) + if bitwidth == 1: + return not_equal(cast(input, tl.int64, builder), tl.tensor(builder.get_int64(0), tl.int64), builder) + + # Casting integer types to pointer types + if src_sca_ty.is_int() and dst_sca_ty.is_ptr(): + return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty) + + # Casting pointer types to pointer types + if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): + return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) + + assert False, f'cannot cast {input} to {dst_ty}' + + +# ===----------------------------------------------------------------------===// +# Memory Operators +# ===----------------------------------------------------------------------===// + + +def _str_to_load_cache_modifier(cache_modifier): + cache = ir.CACHE_MODIFIER.NONE # default + if cache_modifier: + if cache_modifier == ".ca": + cache = ir.CACHE_MODIFIER.CA + elif cache_modifier == ".cg": + cache = ir.CACHE_MODIFIER.CG + else: + raise ValueError(f"Cache modifier {cache_modifier} not supported") + return cache + + +def _str_to_store_cache_modifier(cache_modifier): + cache = ir.CACHE_MODIFIER.NONE # default + if cache_modifier: + if cache_modifier == ".wb": + cache = ir.CACHE_MODIFIER.WB + elif cache_modifier == ".cg": + cache = ir.CACHE_MODIFIER.CG + elif cache_modifier == ".cs": + cache = ir.CACHE_MODIFIER.CS + elif cache_modifier == ".wt": + cache = ir.CACHE_MODIFIER.WT + else: + raise ValueError(f"Cache modifier {cache_modifier} not supported") + return cache + + +def _str_to_eviction_policy(eviction_policy): + eviction = ir.EVICTION_POLICY.NORMAL # default + if eviction_policy: + if eviction_policy == "evict_last": + eviction = ir.EVICTION_POLICY.EVICT_LAST + elif eviction_policy == "evict_first": + eviction = ir.EVICTION_POLICY.EVICT_FIRST + else: + raise ValueError(f"Eviction policy {eviction_policy} not supported") + return eviction + + +def _str_to_padding_option(padding_option): + padding = None # default + if padding_option: + if padding_option == "zero": + padding = ir.PADDING_OPTION.PAD_ZERO + elif padding_option == "nan": + padding = ir.PADDING_OPTION.PAD_NAN + else: + raise ValueError(f"Padding option {padding_option} not supported") + return padding + + +def _str_to_sem(sem_option): + sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE + if sem_option: + if sem_option == "acquire": + sem = ir.MEM_SEMANTIC.ACQUIRE + elif sem_option == "release": + sem = ir.MEM_SEMANTIC.RELEASE + elif sem_option == "acq_rel": + sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE + elif sem_option == "relaxed": + sem = ir.MEM_SEMANTIC.RELAXED + else: + raise ValueError(f"Memory semantic {sem_option} not supported") + return sem + + +def _str_to_scope(scope_option): + scope = ir.MEM_SYNC_SCOPE.GPU + if scope_option: + if scope_option == "gpu": + scope = ir.MEM_SYNC_SCOPE.GPU + elif scope_option == "cta": + scope = ir.MEM_SYNC_SCOPE.CTA + elif scope_option == "sys": + scope = ir.MEM_SYNC_SCOPE.SYSTEM + else: + raise ValueError(f"Memory semantic {scope_option} not supported") + return scope + + +def _canonicalize_boundary_check(boundary_check, block_shape): + if boundary_check: + if not hasattr(boundary_check, "__iter__"): + boundary_check = [boundary_check] + boundary_check = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in boundary_check] + for dim in boundary_check: + assert isinstance(dim, int) and 0 <= dim < len(block_shape) + assert len(boundary_check) > 0 + assert len(boundary_check) == len(set(boundary_check)), "Duplicate dimension in `boundary_check`" + return sorted(boundary_check) + return tuple() + + +def _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): + # Load by a block pointer: `pointer_type>` + # Block pointer can not have `mask` and `other` arguments + if mask or other: + raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") + + elt_ty = ptr.type.element_ty.element_ty + assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" + if elt_ty.is_int() and padding == ir.PADDING_OPTION.PAD_NAN: + raise ValueError("Padding option `nan` is not supported for integer block pointers") + + # `dst_ty` is de-referenced type of the pointer type + dst_ty = ptr.type.element_ty + + # Check `boundary_check` argument + boundary_check = _canonicalize_boundary_check(boundary_check, dst_ty.get_block_shapes()) + + # Build IR + return tl.tensor( + builder.create_tensor_pointer_load(ptr.handle, boundary_check, padding, cache, eviction, is_volatile), dst_ty) + + +def _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder): + # Load by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + if not ptr.type.scalar.is_ptr(): + raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.load`") + + # Check `mask`, `other`, `boundary_check`, and `padding` arguments + if not mask and other: + raise ValueError("`other` cannot be provided without `mask`") + if padding or boundary_check: + raise ValueError("`padding_option` or `boundary_check` argument is not supported for loading a tensor of" + "pointers or loading a scalar. Because the compiler does not know the boundary; please " + "use block pointers (defined by `make_block_ptr`) instead") + + # For a pointer of scalar, check the type of `mask` and `other` + if not ptr.type.is_block(): + if mask and mask.type.is_block(): + raise ValueError("Mask argument cannot be block type if pointer argument is not a block") + if other and other.type.is_block(): + raise ValueError("Other argument cannot be block type if pointer argument is not a block") + + # Make `mask` and `other` into the same shape as `ptr` + if ptr.type.is_block(): + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + if other: + other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder) + + # Get `pointer_type` and `elt_ty` + ptr_ty = ptr.type.scalar + elt_ty = ptr_ty.element_ty + + # Treat `pointer_type` as `pointer_type` + if elt_ty == tl.int1: + elt_ty = tl.int8 + ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) + ptr = cast(ptr, ptr_ty, builder) + + # Cast `other` into `ele_ty` type + if other: + other = cast(other, elt_ty, builder) + + # Create loaded result type `dst_ty` + if ptr.type.is_block(): + shape = ptr.type.get_block_shapes() + dst_ty = tl.block_type(elt_ty, shape) + else: + # Load by de-referencing the pointer of scalar + dst_ty = elt_ty + + # Build IR + if not mask: + return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty) + else: + return tl.tensor( + builder.create_masked_load(ptr.handle, mask.handle, other.handle if other else None, cache, eviction, + is_volatile), dst_ty) + + +def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], boundary_check, padding_option: str, + cache_modifier: str, eviction_policy: str, is_volatile: bool, builder: ir.builder) -> tl.tensor: + # Cache, eviction and padding options + cache = _str_to_load_cache_modifier(cache_modifier) + eviction = _str_to_eviction_policy(eviction_policy) + padding = _str_to_padding_option(padding_option) + + if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): + # Load by a block pointer: `pointer_type>` + return _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) + else: + # Load by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + return _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder) + + +def _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder): + # Store by a block pointer: `pointer_type>` + # Block pointers can not have the `mask` argument + if mask: + raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers") + + # Check same shape and element type + block_shape = ptr.type.element_ty.get_block_shapes() + if not val.type.is_block(): + val = broadcast_impl_shape(val, block_shape, builder) + assert val.type.is_block(), "Value argument must be block type or a scalar" + assert block_shape == val.type.get_block_shapes( + ), f"Block shape({block_shape}) and value shape({val.type.get_block_shapes()}) mismatch" + assert ptr.type.element_ty.element_ty == val.type.element_ty, f"Block element type({ptr.type.element_ty.element_ty}) and value element type({val.type.element_ty}) mismatch" + + elt_ty = ptr.type.element_ty.element_ty + assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`" + + # Check `boundary_check` argument + boundary_check = _canonicalize_boundary_check(boundary_check, block_shape) + + # Build IR + return tl.tensor(builder.create_tensor_pointer_store(ptr.handle, val.handle, boundary_check, cache, eviction), + tl.void) + + +def _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder): + # Store by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + if not ptr.type.scalar.is_ptr(): + raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.store`") + + # Check `boundary_check` argument + if boundary_check: + raise ValueError("`boundary_check` argument is not supported for storing a tensor of pointers or storing a " + "scalar. Because the compiler does not know the boundary; please use block pointers " + "(defined by `make_block_ptr`) instead") + + # For a pointer of scalar, check the type of `val` and `mask` + if not ptr.type.is_block(): + if val.type.is_block(): + raise ValueError("Value argument cannot be block type if pointer argument is not a block") + if mask and mask.type.is_block(): + raise ValueError("Mask argument cannot be block type if pointer argument is not a block") + + # Make `mask` and `val` into the same shape as `ptr` + if ptr.type.is_block(): + val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + + ptr_ty = ptr.type.scalar + elt_ty = ptr_ty.element_ty + + # Treat `pointer_type` as `pointer_type` + if elt_ty == tl.int1: + elt_ty = tl.int8 + ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) + ptr = cast(ptr, ptr_ty, builder) + + # Cast to target data type + val = cast(val, elt_ty, builder) + + # Build IR + if not mask: + return tl.tensor(builder.create_store(ptr.handle, val.handle, cache, eviction), tl.void) + if not mask.type.scalar.is_bool(): + raise ValueError("Mask must have boolean scalar type") + return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, cache, eviction), tl.void) + + +def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], boundary_check, cache_modifier: str, + eviction_policy: str, builder: ir.builder) -> tl.tensor: + # Cache and eviction options + cache = _str_to_store_cache_modifier(cache_modifier) + eviction = _str_to_eviction_policy(eviction_policy) + + if ptr.type.is_ptr() and ptr.type.element_ty.is_block(): + # Store by a block pointer: `pointer_type>` + return _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder) + else: + # Store by a tensor of pointers or a pointer of scalar: `block_type>` or `pointer_type<>` + return _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder) + + +######### +# atomic +######### + + +def atomic_cas(ptr: tl.tensor, cmp: tl.tensor, val: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + element_ty = ptr.type.scalar.element_ty + if element_ty.primitive_bitwidth not in [16, 32, 64]: + raise ValueError("atomic_cas only supports elements with width {16, 32, 64}") + return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle, sem, scope), val.type) + + +def atom_red_typechecking_impl(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, op: str, + builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]: + if not ptr.type.scalar.is_ptr(): + raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__()) + element_ty = ptr.type.scalar.element_ty + if element_ty is tl.float16 and op != 'add': + raise ValueError("atomic_" + op + " does not support fp16") + if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]: + raise ValueError("atomic_" + op + " does not support " + str(element_ty)) + if ptr.type.is_block(): + if mask: + mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder) + if val: + val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder) + val = cast(val, ptr.type.scalar.element_ty, builder) + if not mask: + mask_ir = builder.get_int1(True) + mask_ty = tl.int1 + if ptr.type.is_block(): + mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes()) + mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes()) + mask = tl.tensor(mask_ir, mask_ty) + return ptr, val, mask + + +def atomic_max(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + # direct call to atomic_max for integers + if sca_ty.is_int(): + if sca_ty.is_int_signed(): + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + else: + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + # for float + # return atomic_smax(i_ptr, i_val) if val >= 0 + # return atomic_umin(i_ptr, i_val) if val < 0 + if sca_ty not in {tl.float32, tl.float64}: + raise TypeError(f"atomic_max not supported for dtype {sca_ty}") + + itype = tl.int32 if sca_ty == tl.float32 else tl.float64 + zero = full([], 0.0, sca_ty, builder) + + i_val = bitcast(val, itype, builder) + i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder) + pos = greater_equal(val, zero, builder) + neg = less_than(val, zero, builder) + pos_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, + and_(mask, pos, builder).handle, sem, scope), i_val.type) + neg_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, + and_(mask, neg, builder).handle, sem, scope), i_val.type) + ret = where(pos, pos_ret, neg_ret, builder) + return bitcast(ret, sca_ty, builder) + + +def atomic_min(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + # direct call to atomic_min for integers + if sca_ty.is_int(): + if sca_ty.is_int_signed(): + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + else: + return tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + # for float + # return atomic_smin(i_ptr, i_val) if val >= 0 + # return atomic_umax(i_ptr, i_val) if val < 0 + if sca_ty not in {tl.float32, tl.float64}: + raise TypeError(f"atomic_min not supported for dtype {sca_ty}") + + itype = tl.int32 if sca_ty == tl.float32 else tl.float64 + zero = full([], 0.0, sca_ty, builder) + + i_val = bitcast(val, itype, builder) + i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder) + pos = greater_equal(val, zero, builder) + neg = less_than(val, zero, builder) + pos_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle, + and_(mask, pos, builder).handle, sem, scope), i_val.type) + neg_ret = tl.tensor( + builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, i_ptr.handle, i_val.handle, + and_(mask, neg, builder).handle, sem, scope), i_val.type) + ret = where(pos, pos_ret, neg_ret, builder) + return bitcast(ret, sca_ty, builder) + + +def atomic_add(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + sca_ty = val.type.scalar + op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD + return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle, sem, scope), val.type) + + +def atomic_and(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_or(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_xor(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +def atomic_xchg(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, + builder: ir.builder) -> tl.tensor: + ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder) + sem = _str_to_sem(sem) + scope = _str_to_scope(scope) + return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle, sem, scope), + val.type) + + +# ===----------------------------------------------------------------------===// +# Linear Algebra +# ===----------------------------------------------------------------------===// + + +def gpu_has_mfma() -> bool: + if not is_hip(): + return False + return True # mfma supported in ['gfx908', 'gfx90a'] + + +def mfma_supported(M, N, K, allow_tf32, ret_scalar_ty) -> bool: + if not gpu_has_mfma(): + return False + # TODO: Add check for configurations and types. + return True + + +def dot(lhs: tl.tensor, rhs: tl.tensor, acc: tl.tensor, allow_tf32: bool, max_num_imprecise_acc: int, + out_dtype: tl.dtype, builder: ir.builder) -> tl.tensor: + + def assert_dtypes_valid(lhs_dtype, rhs_dtype, options): + if not options.allow_fp8e4nv: + assert not lhs_dtype.is_fp8e4nv() and not rhs_dtype.is_fp8e4nv( + ), "Dot op does not support fp8e4nv on CUDA arch < 90" + if lhs_dtype.is_fp8() and rhs_dtype.is_fp8(): + return + assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!" + else: + assert not lhs_dtype.is_fp8e4b15() and not rhs_dtype.is_fp8e4b15( + ), "Dot op does not support fp8e4b15 on CUDA arch >= 90" + assert not lhs_dtype.is_fp8e4b15x4() and not rhs_dtype.is_fp8e4b15x4( + ), "Dot op does not support fp8e4b15x4 on CUDA arch >= 90" + if lhs_dtype.is_int() or rhs_dtype.is_int(): + assert lhs_dtype == rhs_dtype, f"Both operands must be same type. First operand ({lhs_dtype}) and second operand ({rhs_dtype})" + assert lhs_dtype.is_int8() or lhs_dtype.is_uint8( + ), f"Both operands must be either int8 or uint8. Operand type ({lhs_dtype})" + elif lhs_dtype.is_fp8() or rhs_dtype.is_fp8(): + assert lhs_dtype.is_fp8e4nv() or lhs_dtype.is_fp8e5( + ), f"Only supports fp8e4nv or fp8e5. First operand ({lhs_dtype})" + assert rhs_dtype.is_fp8e4nv() or rhs_dtype.is_fp8e5( + ), f"Only supports fp8e4nv or fp8e5. Second operand ({rhs_dtype})" + else: + assert lhs_dtype.is_fp16() or lhs_dtype.is_bf16() or lhs_dtype.is_fp32() or lhs_dtype.is_int1( + ), f"Unsupported dtype {lhs_dtype}" + assert rhs_dtype.is_fp16() or rhs_dtype.is_bf16() or rhs_dtype.is_fp32() or rhs_dtype.is_int1( + ), f"Unsupported dtype {rhs_dtype}" + assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!" + + assert lhs.type.is_block() and rhs.type.is_block() + + assert_dtypes_valid(lhs.dtype, rhs.dtype, builder.options) + + assert len(lhs.shape) == 2, f"First input shape ({lhs.shape}) is not two dimensional!" + assert len(rhs.shape) == 2, f"Second input shape ({rhs.shape}) is not two dimensional!" + assert lhs.shape[1].value == rhs.shape[ + 0].value, f"First input shape ({lhs.shape}) and second input shape {rhs.shape} are not compatible for matmul (second index of first shape ({lhs.shape[1].value}) must be equal to first index of second shape ({rhs.shape[0].value})" + assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \ + and rhs.shape[1].value >= 16, \ + f"All values in both first input shape ({lhs.shape}) and second input shape ({rhs.shape}) must be >= 16!" + if lhs.type.scalar.is_int(): + assert lhs.type.scalar == tl.int8, "only int8 supported!" + # TODO: This is CUDA specific, check if ROCm has the same limitation + assert lhs.shape[1].value >= 32, "small blocks not supported!" + _0 = builder.get_int32(0) + ret_scalar_ty = tl.int32 + elif out_dtype.is_bf16(): + raise ValueError( + "out_dtype=bfloat16 is unsupported. Please use out_dtype=float32/float16 and cast with `.to(tl.bfloat16)`") + elif lhs.type.scalar.is_fp32() or lhs.type.scalar.is_bf16(): + _0 = builder.get_fp32(0) + ret_scalar_ty = tl.float32 + else: + _0 = builder.get_fp16(0) if out_dtype.is_fp16() else builder.get_fp32(0) + ret_scalar_ty = out_dtype + + M = lhs.type.shape[0] + N = rhs.type.shape[1] + + # Cast operands of types f16 and i8 for configurations where FMA only supported. + if is_hip() and not mfma_supported(M, N, lhs.type.shape[1], allow_tf32, ret_scalar_ty): + ret_cast_scalar_ty = tl.float32 if lhs.type.scalar.is_int() else ret_scalar_ty + lhs = cast(lhs, ret_cast_scalar_ty, builder) + rhs = cast(rhs, ret_cast_scalar_ty, builder) + if ret_cast_scalar_ty == tl.float16: + _0 = builder.create_splat(builder.get_fp16(0), [M, N]) + else: + _0 = builder.create_splat(builder.get_fp32(0), [M, N]) + ret_ty = tl.block_type(ret_cast_scalar_ty, [M, N]) + ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) + return cast(ret, ret_scalar_ty, builder) + if is_hip() and mfma_supported(M, N, lhs.type.shape[1], allow_tf32, + ret_scalar_ty) and ret_scalar_ty.primitive_bitwidth < 32: + if lhs.type.scalar.is_int(): + ret_dot_scalar_ty = tl.int32 + _0 = builder.create_splat(builder.get_int32(0), [M, N]) + else: + ret_dot_scalar_ty = tl.float32 + _0 = builder.create_splat(builder.get_fp32(0), [M, N]) + ret_ty = tl.block_type(ret_dot_scalar_ty, [M, N]) + ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty) + return cast(ret, ret_scalar_ty, builder) + ret_ty = tl.block_type(ret_scalar_ty, [M, N]) + if acc is None: + acc_handle = builder.create_splat(_0, [M, N]) + else: + acc_handle = acc.handle + assert acc.type == ret_ty + + # max_num_imprecise_acc only applies to fp8 -> fp32 dot on sm_90 + max_num_imprecise_acc = 0 + if lhs.dtype.is_fp8() and rhs.dtype.is_fp8(): + max_num_imprecise_acc = builder.options.max_num_imprecise_acc_default + if max_num_imprecise_acc is None: + max_num_imprecise_acc = 2**30 + + return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, acc_handle, allow_tf32, max_num_imprecise_acc), ret_ty) + + +# ===----------------------------------------------------------------------===// +# Indexing +# ===----------------------------------------------------------------------===// + + +def where(condition: tl.tensor, x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: + condition = cast(condition, tl.int1, builder) + if condition.type.is_block(): + condition, x = broadcast_impl_value(condition, x, builder) + x, y = broadcast_impl_value(x, y, builder) + condition, x = broadcast_impl_value(condition, x, builder) + + x, y = binary_op_type_checking_impl(x, y, builder, True, True) + if not condition.type.is_block(): + condition, _ = broadcast_impl_value(condition, x, builder) + ret_ty = x.type + return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty) + + +# ===----------------------------------------------------------------------===// +# Reduction +# ===----------------------------------------------------------------------=== + + +def reduction(inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder) -> Tuple[tl.tensor, ...]: + if axis is None: + new_inputs = [] + for i in range(len(inputs)): + new_shape = [inputs[i].numel.value] + new_inputs.append(view(inputs[i], new_shape, builder)) + inputs = tuple(new_inputs) + axis = 0 + # get result shape + shape = inputs[0].type.shape + ret_shape = [s for i, s in enumerate(shape) if i != axis] + for t in inputs: + assert t.type.shape == shape + + def wrap_tensor(x, scalar_ty): + if ret_shape: + res_ty = tl.block_type(scalar_ty, ret_shape) + else: + # 0d-tensor -> scalar + res_ty = scalar_ty + return tl.tensor(x, res_ty) + + reduce_op = builder.create_reduce([t.handle for t in inputs], axis) + region_builder_fn(reduce_op) + reduce_op.verify() + + return tuple(wrap_tensor(reduce_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs))) + + +# ===----------------------------------------------------------------------=== +# Associative Scan +# ===----------------------------------------------------------------------=== + + +def associative_scan(inputs: Sequence[tl.tensor], axis: int, region_builder_fn, + builder: ir.builder) -> Tuple[tl.tensor, ...]: + if len(inputs) != 1: + raise ValueError("Current implementation only support single tensor input") + shape = inputs[0].type.shape + + def wrap_tensor(x, scalar_ty): + res_ty = tl.block_type(scalar_ty, shape) + return tl.tensor(x, res_ty) + + scan_op = builder.create_scan([t.handle for t in inputs], axis) + region_builder_fn(scan_op) + scan_op.verify() + + return tuple(wrap_tensor(scan_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs))) + + +# ===----------------------------------------------------------------------=== +# Math +# ===----------------------------------------------------------------------=== + + +def _check_dtype(dtypes: List[str]) -> T: + """ + We're following libdevice's convention to check accepted data types for math functions. + It is not a good practice to support all data types as accelerators/GPUs don't support + many float16 and bfloat16 math operations. + We should let the users know that they are using and invoke explicit cast to convert + the data type to the supported one. + """ + + def wrapper(fn): + + @wraps(fn) + def check(*args, **kwargs): + # concatenate args and kwargs + all_args = list(args) + list(kwargs.values()) + for arg in [a for a in all_args if isinstance(a, tl.tensor)]: + if arg.type.scalar.name not in dtypes: + raise ValueError(f"Expected dtype {dtypes} but got {arg.type.scalar.name}") + return fn(*args, **kwargs) + + return check + + return wrapper + + +def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: + x, y = binary_op_type_checking_impl(x, y, builder) + # FIXME(Keren): not portable, should be fixed + from . import math + return math.mulhi(x, y, _builder=builder) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor: + # FIXME(Keren): not portable, should be fixed + from . import math + return math.floor(x, _builder=builder) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_exp(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def log(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_log(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_cos(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_sin(x.handle), x.type) + + +@_check_dtype(dtypes=["fp32", "fp64"]) +def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_sqrt(x.handle), x.type) + + +def abs(x: tl.tensor, builder: ir.builder) -> tl.tensor: + dtype = x.dtype + if dtype.is_floating(): + return tl.tensor(builder.create_fabs(x.handle), x.type) + elif dtype.is_int_signed(): + return tl.tensor(builder.create_iabs(x.handle), x.type) + elif dtype.is_int_unsigned(): + return x # no-op + else: + assert False, f"Unexpected dtype {dtype}" + + +## + + +def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor: + if max(1, len(x.shape)) != len(values): + raise ValueError("Shape of input to multiple_of does not match the length of values") + x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context())) + return x + + +def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor: + if len(x.shape) != len(values): + raise ValueError("Shape of input to max_contiguous does not match the length of values") + x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context())) + return x + + +def max_constancy(x: tl.tensor, values: List[int]) -> tl.tensor: + if len(x.shape) != len(values): + raise ValueError("Shape of input to max_constancy does not match the length of values") + x.handle.set_attr("tt.constancy", ir.make_attr(values, x.handle.get_context())) + return x + + +def debug_barrier(builder: ir.builder) -> tl.tensor: + return tl.tensor(builder.create_barrier(), tl.void) + + +def device_print(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor: + # It makes sense visually for prefix to end in ": "; make it so. Also, + # non-empty prefixes should start with " ". + if not prefix.endswith(" ") and args: + prefix += " " + if not prefix.endswith(": ") and args: + prefix = prefix[:-1] + ": " + if len(prefix) > 2 and not prefix.startswith(" "): + prefix = " " + prefix + + new_args = [] + for arg in args: + new_args.append(arg.handle) + return tl.tensor(builder.create_print(prefix, new_args), tl.void) + + +def device_assert(cond: tl.tensor, msg: str, file_name: str, func_name, lineno: int, builder: ir.builder) -> tl.tensor: + cond_ty = cond.type + if not cond_ty.is_block(): + cond_ty = tl.block_type(cond_ty.scalar, (1, )) + cond = tl.tensor(builder.create_splat(cond.handle, (1, )), cond_ty) + return tl.tensor(builder.create_assert(cond.handle, msg, file_name, func_name, lineno), tl.void) + + +def _convert_elem_to_ir_value(builder, elem, require_i64): + if isinstance(elem, int): + elem = tl.constexpr(elem) + if isinstance(elem, tl.constexpr): + return builder.get_int64(elem.value) if require_i64 else builder.get_int32(elem.value) + elif isinstance(elem, tl.tensor): + assert elem.numel.value == 1, "Expected a scalar in shape/strides/offsets" + assert elem.dtype.is_int(), "Expected an integer scalar type in shape/strides/offsets" + if elem.dtype != tl.int64 and require_i64: + return builder.create_int_cast(elem.handle, builder.get_int64_ty(), elem.dtype.is_int_signed()) + elif elem.dtype != tl.int32: + return builder.create_int_cast(elem.handle, builder.get_int32_ty(), elem.dtype.is_int_signed()) + return elem.handle + assert False, f"Unsupported element type in shape/strides/offsets: {type(elem)}" + + +def _convert_to_ir_values(builder, list_like, require_i64=True): + if hasattr(list_like, "__iter__"): + return [_convert_elem_to_ir_value(builder, elem, require_i64) for elem in list_like] + return [_convert_elem_to_ir_value(builder, list_like, require_i64)] + + +def make_block_ptr(base: tl.tensor, shape, strides, offsets, block_shape, order, builder: ir.builder) -> tl.tensor: + # Convert dynamic arguments to IR values + # NOTES(Chenggang): current `shape/strides` are `int64_t`, while `offsets/block_shape` are `int32_t` + shape = _convert_to_ir_values(builder, shape) + strides = _convert_to_ir_values(builder, strides) + offsets = _convert_to_ir_values(builder, offsets, require_i64=False) + + # Check `base` type + if not base.type.is_ptr() or base.type.element_ty.is_block(): + raise ValueError("Expected `base` to be a pointer type (but not a block pointer type or others)") + + # Treat `pointer_type` as `pointer_type` + if base.type.element_ty == tl.int1: + base = cast(base, tl.pointer_type(tl.int8, base.type.address_space), builder) + + # Check whether `block_shape` is static + if not hasattr(block_shape, "__iter__"): + block_shape = [block_shape] + block_shape = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in block_shape] + assert all([isinstance(elem, int) and -2**31 <= elem < 2**31 for elem in block_shape]), \ + "Expected a list of constant integers (`int32_t` range) in `block_shape`" + + # Check `order` + if not hasattr(order, "__iter__"): + order = [order] + order = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in order] + assert sorted(order) == list(range(len(order))), "Expected a permutation of (0, 1, ..., len(order)-1) in order" + + # Must have same length + assert all([len(block_shape) == len(list_like) for list_like in [shape, strides, offsets, order]]), \ + "Expected shape/strides/offsets/block_shape to have the same length" + + # Build value, the type is: + # `pointer_type>` in Python + # `tt.ptr>` in MLIR + handle = builder.create_make_block_ptr(base.handle, shape, strides, offsets, block_shape, order) + return tl.tensor(handle, tl.pointer_type(tl.block_type(base.type.element_ty, block_shape))) + + +def advance(base: tl.tensor, offsets, builder: ir.builder) -> tl.tensor: + # Convert dynamic offsets to IR values + offsets = _convert_to_ir_values(builder, offsets, require_i64=False) + + # Advanced block pointer type is the same as before + return tl.tensor(builder.create_advance(base.handle, offsets), base.type) diff --git a/venv/lib/python3.10/site-packages/triton/language/standard.py b/venv/lib/python3.10/site-packages/triton/language/standard.py new file mode 100644 index 0000000000000000000000000000000000000000..c211655b88bb024cdacb661701cc459754fedbe1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/triton/language/standard.py @@ -0,0 +1,404 @@ +from __future__ import annotations + +from ..runtime.jit import jit +from . import core, math + +# ----------------------- +# Standard library +# ----------------------- + + +@jit +def cdiv(x, div): + """ + Computes the ceiling division of :code:`x` by :code:`div` + + :param x: the input number + :type x: Block + :param div: the divisor + :param div: Block + """ + return (x + div - 1) // div + + +@jit +@core._add_math_1arg_docstr("sigmoid") +def sigmoid(x): + return 1 / (1 + core.exp(-x)) + + +@jit +@core._add_math_1arg_docstr("softmax") +def softmax(x, ieee_rounding=False): + z = x - max(x, 0) + num = core.exp(z) + den = sum(num, 0) + return core.fdiv(num, den, ieee_rounding) + + +@jit +def ravel(x): + """ + Returns a contiguous flattened view of :code:`x`. + + :param x: the input tensor + :type x: Block + """ + return core.view(x, [x.numel]) + + +@jit +def swizzle2d(i, j, size_i, size_j, size_g): + """ + Transforms indices of a row-major size_i*size_j matrix into those + of one where indices are row major for each group of size_j rows. + For example, for size_i = size_j = 4 and size_g = 2, it will transform + [[0 , 1 , 2 , 3 ], + [4 , 5 , 6 , 7 ], + [8 , 9 , 10, 11], + [12, 13, 14, 15]] + into + [[0, 2, 4 , 6 ], + [1, 3, 5 , 7 ], + [8, 10, 12, 14], + [9, 11, 13, 15]] + """ + # "unrolled index in array" + ij = i * size_j + j + # number of elements in `size_g` groups + # of `size_j` columns + size_gj = size_g * size_j + # index of the group in which (i,j) is + group_id = ij // size_gj + # row-index of the first element of this group + off_i = group_id * size_g + # last group may have fewer rows + size_g = minimum(size_i - off_i, size_g) + # new row and column indices + new_i = off_i + (ij % size_g) + new_j = (ij % size_gj) // size_g + return new_i, new_j + + +@jit +def zeros(shape, dtype): + """ + Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`. + + :param shape: Shape of the new array, e.g., (8, 16) or (8, ) + :type shape: tuple of ints + :param dtype: Data-type of the new array, e.g., :code:`tl.float16` + :type dtype: DType + """ + return core.full(shape, 0, dtype) + + +@jit +def zeros_like(input): + return zeros(input.shape, input.dtype) + + +@jit +def minimum(x, y): + """ + Computes the element-wise minimum of :code:`x` and :code:`y`. + + :param input: the first input tensor + :type input: Block + :param other: the second input tensor + :type other: Block + """ + return math.min(x, y) + + +@jit +def maximum(x, y): + """ + Computes the element-wise maximum of :code:`x` and :code:`y`. + + :param input: the first input tensor + :type input: Block + :param other: the second input tensor + :type other: Block + """ + return math.max(x, y) + + +# max and argmax + + +@jit +def _argmax_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + gt = value1 > value2 or tie + v_ret = core.where(gt, value1, value2) + i_ret = core.where(gt, index1, index2) + return v_ret, i_ret + + +@jit +def _argmax_combine_tie_break_left(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, True) + + +@jit +def _argmax_combine_tie_break_fast(value1, index1, value2, index2): + return _argmax_combine(value1, index1, value2, index2, False) + + +@jit +@core._add_reduction_docstr("maximum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def max(input, axis=None, return_indices=False, return_indices_tie_break_left=True): + input = core._promote_reduction_input(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_left) + else: + return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_fast) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < core.constexpr(32): + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_integer_type() + input = input.to(core.int32) + return core.reduce(input, axis, maximum) + + +@jit +@core._add_reduction_docstr("maximum index", tie_break_arg="tie_break_left") +def argmax(input, axis, tie_break_left=True): + (_, ret) = max(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) + return ret + + +# min and argmin + + +@jit +def _argmin_combine(value1, index1, value2, index2, tie_break_left): + if tie_break_left: + tie = value1 == value2 and index1 < index2 + else: + tie = False + lt = value1 < value2 or tie + value_ret = core.where(lt, value1, value2) + index_ret = core.where(lt, index1, index2) + return value_ret, index_ret + + +@jit +def _argmin_combine_tie_break_left(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, True) + + +@jit +def _argmin_combine_tie_break_fast(value1, index1, value2, index2): + return _argmin_combine(value1, index1, value2, index2, False) + + +@jit +@core._add_reduction_docstr("minimum", return_indices_arg="return_indices", + tie_break_arg="return_indices_tie_break_left") +def min(input, axis=None, return_indices=False, return_indices_tie_break_left=True): + input = core._promote_reduction_input(input) + if return_indices: + if return_indices_tie_break_left: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_left) + else: + return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_fast) + else: + if core.constexpr(input.dtype.primitive_bitwidth) < 32: + if core.constexpr(input.dtype.is_floating()): + input = input.to(core.float32) + else: + assert input.dtype.is_integer_type() + input = input.to(core.int32) + return core.reduce(input, axis, minimum) + + +@jit +@core._add_reduction_docstr("minimum index", tie_break_arg="tie_break_left") +def argmin(input, axis, tie_break_left=True): + _, ret = min(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left) + return ret + + +@jit +def _sum_combine(a, b): + return a + b + + +# sum + + +@jit +@core._add_reduction_docstr("sum") +def sum(input, axis=None): + input = core._promote_reduction_input(input) + return core.reduce(input, axis, _sum_combine) + + +@jit +def _xor_combine(a, b): + return a ^ b + + +# xor sum + + +@core.builtin +@core._add_reduction_docstr("xor sum") +def xor_sum(input, axis=None, _builder=None, _generator=None): + scalar_ty = input.type.scalar + if not scalar_ty.is_int(): + raise ValueError("xor_sum only supported for integers") + + input = core._promote_reduction_input(input, _builder=_builder) + return core.reduce(input, axis, _xor_combine, _builder=_builder, _generator=_generator) + + +# cumsum + + +@jit +@core._add_scan_docstr("cumsum") +def cumsum(input, axis=0): + # todo rename this to a generic function name + input = core._promote_reduction_input(input) + return core.associative_scan(input, axis, _sum_combine) + + +# cumprod + + +@jit +def _prod_combine(a, b): + return a * b + + +@jit +@core._add_scan_docstr("cumprod") +def cumprod(input, axis=0): + # todo rename this to a generic function name + input = core._promote_reduction_input(input) + return core.associative_scan(input, axis, _prod_combine) + + +# sort + + +@jit +def _indicator(n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr): + core.static_assert(idx < n_dims) + core.static_assert((pos == 0) or (pos == 1)) + y = core.arange(0, 2) + if pos == 0: + y = 1 - y + + for n in core.static_range(0, n_dims): + if n != n_dims - 1 - idx: + y = core.expand_dims(y, n) + return y + + +@jit +def _take_slice(x, n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr, keep_dim: core.constexpr = True): + y = sum(x * _indicator(n_dims, idx, pos), n_dims - 1 - idx) + if keep_dim: + y = core.expand_dims(y, n_dims - 1 - idx) + + return y + + +@jit +def _compare_and_swap(x, desc_mask, n_dims: core.constexpr, idx: core.constexpr): + l = _take_slice(x, n_dims, idx, 0) + r = _take_slice(x, n_dims, idx, 1) + + x_int = x + l_int = l + r_int = r + if x.dtype.is_floating(): + if core.constexpr(x.dtype.primitive_bitwidth) == 16: + dtype_int = core.int16 + elif core.constexpr(x.dtype.primitive_bitwidth) == 32: + dtype_int = core.int32 + elif core.constexpr(x.dtype.primitive_bitwidth) == 64: + dtype_int = core.int64 + else: + raise ValueError("Unsupported dtype") + x_int = x.to(dtype_int, bitcast=True) + l_int = l.to(dtype_int, bitcast=True) + r_int = r.to(dtype_int, bitcast=True) + desc_mask = desc_mask.to(x_int.dtype) + zero = zeros_like(x_int) + y = x_int ^ core.where((l > r) ^ desc_mask, l_int ^ r_int, zero) + y = y.to(x.dtype, bitcast=True) + return y + + +@jit +def _bitonic_merge(x, n_dims: core.constexpr, active_dims: core.constexpr, order_type: core.constexpr): + ''' + order_type 0 == ascending + order_type 1 == descending + order_type 2 == alternating + ''' + core.static_assert(active_dims <= n_dims) + + if order_type == 2: + desc_mask = _indicator(n_dims, active_dims, 1) + else: + desc_mask = order_type + + for i in core.static_range(active_dims): + x = _compare_and_swap(x, desc_mask, n_dims, active_dims - 1 - i) + + return x + + +def _log2(i: core.constexpr): + log2 = 0 + n = i.value + while n > 1: + n >>= 1 + log2 += 1 + return core.constexpr(log2) + + +def _is_power_of_two(i: core.constexpr): + n = i.value + return core.constexpr((n & (n - 1)) == 0 and n != 0) + + +def _unwrap_if_constexpr(o): + return o.value if isinstance(o, core.constexpr) else o + + +def _get_sort_dim(dim, shape): + dim = _unwrap_if_constexpr(dim) + shape = _unwrap_if_constexpr(shape) + if dim is None: + dim = len(shape) - 1 + assert dim == len(shape) - 1, "Currently only support sorting on the last dimension" + return core.constexpr(dim) + + +@jit +def sort(x, dim=None, descending: core.constexpr = 0): + core.static_assert(_is_power_of_two(x.shape[_get_sort_dim(dim, x.shape)])) + core.static_assert(_is_power_of_two(x.numel)) + # reshape the tensor to have all dimensions be 2. + # TODO: We shouldn't have to change the dimensions not sorted. + y = core.reshape(x, [2] * _log2(x.numel)) + for i in core.static_range(1, _log2(x.shape[_get_sort_dim(dim, x.shape)]) + 1): + y = _bitonic_merge(y, _log2(x.numel), i, (descending if + (i == _log2(x.shape[_get_sort_dim(dim, x.shape)])) else 2)) + + x = core.reshape(y, x.shape) + return x diff --git a/venv/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01525cfeb575bf103e3a6f0f38d8365e6b63131f Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e758662b45080189b4ca4d2b8b58ea9e65c2db6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/cross_entropy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f1f762d3e7e39e85f6c18413ecb11b333a24d1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/flash_attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfaae94c22b99d94c1a4224b7c27e002f1e49b6c Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..718386a63839fd5eb41152e66ac172e6579eefd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8330868475494cb7a7f48eee813b1894ee96744f Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26eef13f79074564078b013f610c61eb84c62df2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/softmax.cpython-310.pyc b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b01ce1303acafd60a46bec50af748c75eff80fcc Binary files /dev/null and b/venv/lib/python3.10/site-packages/triton/ops/blocksparse/__pycache__/softmax.cpython-310.pyc differ