diff --git a/ckpts/universal/global_step20/zero/17.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/17.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..89a28ec4207011c382764a1bb3fe267cf8dc5b64 --- /dev/null +++ b/ckpts/universal/global_step20/zero/17.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7382d49ddd28f1cacc4b66771639513acf212e04660d45024888334326a3bdd +size 16778396 diff --git a/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d17ed5e0c583ecdb076b7e77d456e527fb4f177 --- /dev/null +++ b/ckpts/universal/global_step20/zero/23.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66bef9ad7961d6e03a9c6362bb57b400cb4177a0d282deab869aa80941b3c4e1 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/8.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/8.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..54d1045b0e169cf4c26013960792766e7ae9296a --- /dev/null +++ b/ckpts/universal/global_step20/zero/8.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97c92885a589fb4e9fedbd90a805b8872e7e1af9ffe76c0e3beede1b0cba1513 +size 16778317 diff --git a/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..91ad1072b14bb7b29d0224248e0969a159c4cebe --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83b78ee61ab8dddb7260f9c8ca4e1f52052d5dd424cdc46b682766ab8ded810f +size 33555533 diff --git a/lm-evaluation-harness/tests/testdata/gguf_test_44e268d15decc4d2d0f99e57e1476269826cd3b54262f7a0981f75ddd45b25d0.pkl b/lm-evaluation-harness/tests/testdata/gguf_test_44e268d15decc4d2d0f99e57e1476269826cd3b54262f7a0981f75ddd45b25d0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f6d36557909011f0a91489f212de38cac70d0f4a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/gguf_test_44e268d15decc4d2d0f99e57e1476269826cd3b54262f7a0981f75ddd45b25d0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d672564bf8af93738531c26d02efda9ed844d1fdcdd619a06e37fce7ef6d8485 +size 153 diff --git a/lm-evaluation-harness/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl b/lm-evaluation-harness/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f468bb46d3da891a285c615b25de9b2d99a7fd8d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4f122bfaa24901cff1ee686da0cf49ade7b6877c31a3daeb32c8cf2e328a77e +size 153 diff --git a/lm-evaluation-harness/tests/testdata/gguf_test_8fcf3f2f52afeb2acd7c8e02c2cc3ce31a691b665d295f6c4e4bbd71c7caa1a2.pkl b/lm-evaluation-harness/tests/testdata/gguf_test_8fcf3f2f52afeb2acd7c8e02c2cc3ce31a691b665d295f6c4e4bbd71c7caa1a2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..057a5cb04f22d7f69cd4b516c0e4507df78b4148 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/gguf_test_8fcf3f2f52afeb2acd7c8e02c2cc3ce31a691b665d295f6c4e4bbd71c7caa1a2.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f79475c06a8800d8abef183b690409f304e0a6963681965f6caba1ca985b243 +size 532 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_3092d07756f3e1d010c07524cc8a2ecba7f0c19f9e39f2aaf2bf440bfe328004.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_3092d07756f3e1d010c07524cc8a2ecba7f0c19f9e39f2aaf2bf440bfe328004.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a1c496a0f45e0b5e23c34a5650fc852349d20ad8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_3092d07756f3e1d010c07524cc8a2ecba7f0c19f9e39f2aaf2bf440bfe328004.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5428228cd48e036420c17844c421eb782a6471cd77075120acde387a4e176ab2 +size 2003 diff --git a/lm-evaluation-harness/tests/testdata/textsynth_test_49c47ae40e11f349f2f6b492128188b1b2bc103a421c676ee4b2142a68b43516.pkl b/lm-evaluation-harness/tests/testdata/textsynth_test_49c47ae40e11f349f2f6b492128188b1b2bc103a421c676ee4b2142a68b43516.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7b02bf1e3c27ff9bb640fe9dc91423dc62db7a90 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/textsynth_test_49c47ae40e11f349f2f6b492128188b1b2bc103a421c676ee4b2142a68b43516.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e93fb2906d04f52b903c5e359f942fb6b0d70ab7be0254409f3be70e3cd45dd0 +size 2467 diff --git a/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3aba19809ca1f6095a1e6c1f170bbdf439e53647 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469692 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/output.log b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..803f1e08bf3d3a2e6fdddcd13cd8d74bef60a0ad --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:08:13,476 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:08:21,942 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:08:21,943 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:08:21,944 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step22000'} +2024-05-23:13:08:24,232 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step22000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step22000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..4361e1a31291fb5f457399fbc2aaf17cf4309357 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:08:13.268953", + "startedAt": "2024-05-23T13:08:12.700786", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step22000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.5000124999997, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.62897872924805 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130812-gglzto8s/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/config.yaml b/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ba4c91b700c93e092af5986329943860256fd57 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/config.yaml @@ -0,0 +1,283 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716987983 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step50000,tokenizer=huggyllama/llama-13b + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/output.log b/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..efd8d135bc30982ef2f6bd884347f61ef4ddfdb5 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130623-ufhzqpyv/files/output.log @@ -0,0 +1,474 @@ + +2024-05-29:13:06:24,468 INFO [__main__.py:251] Verbosity set to INFO +2024-05-29:13:06:33,992 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-29:13:06:33,994 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-29:13:06:33,994 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step50000', 'tokenizer': 'huggyllama/llama-13b'} +2024-05-29:13:06:36,305 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 17.1MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 4.29MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 4.54MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 1.12MB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 116720.18 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 329497.98 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 199862.34 examples/s] +2024-05-29:13:07:07,312 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:07,312 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 40.1MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 30.1MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 93.2MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 22315.67 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22508.50 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 23291.16 examples/s] +2024-05-29:13:07:11,191 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:11,191 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 1.25MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16405.95 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 12704.62 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 17504.42 examples/s] +2024-05-29:13:07:13,341 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:13,341 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-29:13:07:13,342 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-29:13:07:13,342 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 42.7MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 4.51MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 514kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.09MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 404541.34 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 173839.50 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 363722.82 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 11.2MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.3MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.23MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 24.8MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23986.43 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 25107.06 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 24244.76 examples/s] +2024-05-29:13:07:24,697 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:07:24,698 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 20.0MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 469kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 1.03MB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1417876.82 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 392554.80 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 535236.69 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 12.0MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 16.0MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 7.05MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 24492.73 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24474.15 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23429.70 examples/s] +2024-05-29:13:07:37,670 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 69817.82it/s] +2024-05-29:13:07:37,759 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2577.60it/s] +2024-05-29:13:07:38,127 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1085.85it/s] +2024-05-29:13:07:39,894 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1905.17it/s] +2024-05-29:13:07:40,127 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 61230.72it/s] +2024-05-29:13:07:40,137 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1918.45it/s] +2024-05-29:13:07:41,980 INFO [task.py:395] Building contexts for arc_easy on rank 0... + +100%|██████████| 2376/2376 [00:02<00:00, 1072.07it/s] +2024-05-29:13:07:44,344 INFO [evaluator.py:379] Running loglikelihood requests +Running loglikelihood requests: 0%| | 0/25011 [00:00 1024). Running this sequence through the model will result in indexing errors +Running loglikelihood requests: 0%| | 0/25011 [00:00 Optional[_T]: + """Parses the string argument and returns the native value. + + By default it returns its argument unmodified. + + Args: + argument: string argument passed in the commandline. + + Raises: + ValueError: Raised when it fails to parse the argument. + TypeError: Raised when the argument has the wrong type. + + Returns: + The parsed value in native type. + """ + if not isinstance(argument, str): + raise TypeError('flag value must be a string, found "{}"'.format( + type(argument))) + return argument + + def flag_type(self) -> Text: + """Returns a string representing the type of the flag.""" + return 'string' + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + """Returns a list of minidom.Element to add additional flag information. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + """ + del doc # Unused. + return [] + + +class ArgumentSerializer(Generic[_T]): + """Base class for generating string representations of a flag value.""" + + def serialize(self, value: _T) -> Text: + """Returns a serialized string of the value.""" + return str(value) + + +class NumericParser(ArgumentParser[_N]): + """Parser of numeric values. + + Parsed value may be bounded to a given upper and lower bound. + """ + + lower_bound: Optional[_N] + upper_bound: Optional[_N] + + def is_outside_bounds(self, val: _N) -> bool: + """Returns whether the value is outside the bounds or not.""" + return ((self.lower_bound is not None and val < self.lower_bound) or + (self.upper_bound is not None and val > self.upper_bound)) + + def parse(self, argument: Text) -> _N: + """See base class.""" + val = self.convert(argument) + if self.is_outside_bounds(val): + raise ValueError('%s is not %s' % (val, self.syntactic_help)) + return val + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + if self.lower_bound is not None: + elements.append(_helpers.create_xml_dom_element( + doc, 'lower_bound', self.lower_bound)) + if self.upper_bound is not None: + elements.append(_helpers.create_xml_dom_element( + doc, 'upper_bound', self.upper_bound)) + return elements + + def convert(self, argument: Text) -> _N: + """Returns the correct numeric value of argument. + + Subclass must implement this method, and raise TypeError if argument is not + string or has the right numeric type. + + Args: + argument: string argument passed in the commandline, or the numeric type. + + Raises: + TypeError: Raised when argument is not a string or the right numeric type. + ValueError: Raised when failed to convert argument to the numeric value. + """ + raise NotImplementedError + + +class FloatParser(NumericParser[float]): + """Parser of floating point values. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = 'a' + number_name = 'number' + syntactic_help = ' '.join((number_article, number_name)) + + def __init__( + self, + lower_bound: Optional[float] = None, + upper_bound: Optional[float] = None, + ) -> None: + super(FloatParser, self).__init__() + self.lower_bound = lower_bound + self.upper_bound = upper_bound + sh = self.syntactic_help + if lower_bound is not None and upper_bound is not None: + sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound)) + elif lower_bound == 0: + sh = 'a non-negative %s' % self.number_name + elif upper_bound == 0: + sh = 'a non-positive %s' % self.number_name + elif upper_bound is not None: + sh = '%s <= %s' % (self.number_name, upper_bound) + elif lower_bound is not None: + sh = '%s >= %s' % (self.number_name, lower_bound) + self.syntactic_help = sh + + def convert(self, argument: Union[int, float, str]) -> float: + """Returns the float value of argument.""" + if (_is_integer_type(argument) or isinstance(argument, float) or + isinstance(argument, str)): + return float(argument) + else: + raise TypeError( + 'Expect argument to be a string, int, or float, found {}'.format( + type(argument))) + + def flag_type(self) -> Text: + """See base class.""" + return 'float' + + +class IntegerParser(NumericParser[int]): + """Parser of an integer value. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = 'an' + number_name = 'integer' + syntactic_help = ' '.join((number_article, number_name)) + + def __init__( + self, lower_bound: Optional[int] = None, upper_bound: Optional[int] = None + ) -> None: + super(IntegerParser, self).__init__() + self.lower_bound = lower_bound + self.upper_bound = upper_bound + sh = self.syntactic_help + if lower_bound is not None and upper_bound is not None: + sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound)) + elif lower_bound == 1: + sh = 'a positive %s' % self.number_name + elif upper_bound == -1: + sh = 'a negative %s' % self.number_name + elif lower_bound == 0: + sh = 'a non-negative %s' % self.number_name + elif upper_bound == 0: + sh = 'a non-positive %s' % self.number_name + elif upper_bound is not None: + sh = '%s <= %s' % (self.number_name, upper_bound) + elif lower_bound is not None: + sh = '%s >= %s' % (self.number_name, lower_bound) + self.syntactic_help = sh + + def convert(self, argument: Union[int, Text]) -> int: + """Returns the int value of argument.""" + if _is_integer_type(argument): + return argument + elif isinstance(argument, str): + base = 10 + if len(argument) > 2 and argument[0] == '0': + if argument[1] == 'o': + base = 8 + elif argument[1] == 'x': + base = 16 + return int(argument, base) + else: + raise TypeError('Expect argument to be a string or int, found {}'.format( + type(argument))) + + def flag_type(self) -> Text: + """See base class.""" + return 'int' + + +class BooleanParser(ArgumentParser[bool]): + """Parser of boolean values.""" + + def parse(self, argument: Union[Text, int]) -> bool: + """See base class.""" + if isinstance(argument, str): + if argument.lower() in ('true', 't', '1'): + return True + elif argument.lower() in ('false', 'f', '0'): + return False + else: + raise ValueError('Non-boolean argument to boolean flag', argument) + elif isinstance(argument, int): + # Only allow bool or integer 0, 1. + # Note that float 1.0 == True, 0.0 == False. + bool_value = bool(argument) + if argument == bool_value: + return bool_value + else: + raise ValueError('Non-boolean argument to boolean flag', argument) + + raise TypeError('Non-boolean argument to boolean flag', argument) + + def flag_type(self) -> Text: + """See base class.""" + return 'bool' + + +class EnumParser(ArgumentParser[Text]): + """Parser of a string enum value (a string value from a given set).""" + + def __init__( + self, enum_values: Iterable[Text], case_sensitive: bool = True + ) -> None: + """Initializes EnumParser. + + Args: + enum_values: [str], a non-empty list of string values in the enum. + case_sensitive: bool, whether or not the enum is to be case-sensitive. + + Raises: + ValueError: When enum_values is empty. + """ + if not enum_values: + raise ValueError( + 'enum_values cannot be empty, found "{}"'.format(enum_values)) + if isinstance(enum_values, str): + raise ValueError( + 'enum_values cannot be a str, found "{}"'.format(enum_values) + ) + super(EnumParser, self).__init__() + self.enum_values = list(enum_values) + self.case_sensitive = case_sensitive + + def parse(self, argument: Text) -> Text: + """Determines validity of argument and returns the correct element of enum. + + Args: + argument: str, the supplied flag value. + + Returns: + The first matching element from enum_values. + + Raises: + ValueError: Raised when argument didn't match anything in enum. + """ + if self.case_sensitive: + if argument not in self.enum_values: + raise ValueError('value should be one of <%s>' % + '|'.join(self.enum_values)) + else: + return argument + else: + if argument.upper() not in [value.upper() for value in self.enum_values]: + raise ValueError('value should be one of <%s>' % + '|'.join(self.enum_values)) + else: + return [value for value in self.enum_values + if value.upper() == argument.upper()][0] + + def flag_type(self) -> Text: + """See base class.""" + return 'string enum' + + +class EnumClassParser(ArgumentParser[_ET]): + """Parser of an Enum class member.""" + + def __init__( + self, enum_class: Type[_ET], case_sensitive: bool = True + ) -> None: + """Initializes EnumParser. + + Args: + enum_class: class, the Enum class with all possible flag values. + case_sensitive: bool, whether or not the enum is to be case-sensitive. If + False, all member names must be unique when case is ignored. + + Raises: + TypeError: When enum_class is not a subclass of Enum. + ValueError: When enum_class is empty. + """ + if not issubclass(enum_class, enum.Enum): + raise TypeError('{} is not a subclass of Enum.'.format(enum_class)) + if not enum_class.__members__: + raise ValueError('enum_class cannot be empty, but "{}" is empty.' + .format(enum_class)) + if not case_sensitive: + members = collections.Counter( + name.lower() for name in enum_class.__members__) + duplicate_keys = { + member for member, count in members.items() if count > 1 + } + if duplicate_keys: + raise ValueError( + 'Duplicate enum values for {} using case_sensitive=False'.format( + duplicate_keys)) + + super(EnumClassParser, self).__init__() + self.enum_class = enum_class + self._case_sensitive = case_sensitive + if case_sensitive: + self._member_names = tuple(enum_class.__members__) + else: + self._member_names = tuple( + name.lower() for name in enum_class.__members__) + + @property + def member_names(self) -> Sequence[Text]: + """The accepted enum names, in lowercase if not case sensitive.""" + return self._member_names + + def parse(self, argument: Union[_ET, Text]) -> _ET: + """Determines validity of argument and returns the correct element of enum. + + Args: + argument: str or Enum class member, the supplied flag value. + + Returns: + The first matching Enum class member in Enum class. + + Raises: + ValueError: Raised when argument didn't match anything in enum. + """ + if isinstance(argument, self.enum_class): + return argument # pytype: disable=bad-return-type + elif not isinstance(argument, str): + raise ValueError( + '{} is not an enum member or a name of a member in {}'.format( + argument, self.enum_class)) + key = EnumParser( + self._member_names, case_sensitive=self._case_sensitive).parse(argument) + if self._case_sensitive: + return self.enum_class[key] + else: + # If EnumParser.parse() return a value, we're guaranteed to find it + # as a member of the class + return next(value for name, value in self.enum_class.__members__.items() + if name.lower() == key.lower()) + + def flag_type(self) -> Text: + """See base class.""" + return 'enum class' + + +class ListSerializer(Generic[_T], ArgumentSerializer[List[_T]]): + + def __init__(self, list_sep: Text) -> None: + self.list_sep = list_sep + + def serialize(self, value: List[_T]) -> Text: + """See base class.""" + return self.list_sep.join([str(x) for x in value]) + + +class EnumClassListSerializer(ListSerializer[_ET]): + """A serializer for :class:`MultiEnumClass` flags. + + This serializer simply joins the output of `EnumClassSerializer` using a + provided separator. + """ + + def __init__(self, list_sep: Text, **kwargs) -> None: + """Initializes EnumClassListSerializer. + + Args: + list_sep: String to be used as a separator when serializing + **kwargs: Keyword arguments to the `EnumClassSerializer` used to serialize + individual values. + """ + super(EnumClassListSerializer, self).__init__(list_sep) + self._element_serializer = EnumClassSerializer(**kwargs) + + def serialize(self, value: Union[_ET, List[_ET]]) -> Text: + """See base class.""" + if isinstance(value, list): + return self.list_sep.join( + self._element_serializer.serialize(x) for x in value) + else: + return self._element_serializer.serialize(value) + + +class CsvListSerializer(ListSerializer[Text]): + + def serialize(self, value: List[Text]) -> Text: + """Serializes a list as a CSV string or unicode.""" + output = io.StringIO() + writer = csv.writer(output, delimiter=self.list_sep) + writer.writerow([str(x) for x in value]) + serialized_value = output.getvalue().strip() + + # We need the returned value to be pure ascii or Unicodes so that + # when the xml help is generated they are usefully encodable. + return str(serialized_value) + + +class EnumClassSerializer(ArgumentSerializer[_ET]): + """Class for generating string representations of an enum class flag value.""" + + def __init__(self, lowercase: bool) -> None: + """Initializes EnumClassSerializer. + + Args: + lowercase: If True, enum member names are lowercased during serialization. + """ + self._lowercase = lowercase + + def serialize(self, value: _ET) -> Text: + """Returns a serialized string of the Enum class value.""" + as_string = str(value.name) + return as_string.lower() if self._lowercase else as_string + + +class BaseListParser(ArgumentParser): + """Base class for a parser of lists of strings. + + To extend, inherit from this class; from the subclass ``__init__``, call:: + + super().__init__(token, name) + + where token is a character used to tokenize, and name is a description + of the separator. + """ + + def __init__( + self, token: Optional[Text] = None, name: Optional[Text] = None + ) -> None: + assert name + super(BaseListParser, self).__init__() + self._token = token + self._name = name + self.syntactic_help = 'a %s separated list' % self._name + + def parse(self, argument: Text) -> List[Text]: + """See base class.""" + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + return [s.strip() for s in argument.split(self._token)] + + def flag_type(self) -> Text: + """See base class.""" + return '%s separated list of strings' % self._name + + +class ListParser(BaseListParser): + """Parser for a comma-separated list of strings.""" + + def __init__(self) -> None: + super(ListParser, self).__init__(',', 'comma') + + def parse(self, argument: Union[Text, List[Text]]) -> List[Text]: + """Parses argument as comma-separated list of strings.""" + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + try: + return [s.strip() for s in list(csv.reader([argument], strict=True))[0]] + except csv.Error as e: + # Provide a helpful report for case like + # --listflag="$(printf 'hello,\nworld')" + # IOW, list flag values containing naked newlines. This error + # was previously "reported" by allowing csv.Error to + # propagate. + raise ValueError('Unable to parse the value %r as a %s: %s' + % (argument, self.flag_type(), e)) + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = super(ListParser, self)._custom_xml_dom_elements(doc) + elements.append(_helpers.create_xml_dom_element( + doc, 'list_separator', repr(','))) + return elements + + +class WhitespaceSeparatedListParser(BaseListParser): + """Parser for a whitespace-separated list of strings.""" + + def __init__(self, comma_compat: bool = False) -> None: + """Initializer. + + Args: + comma_compat: bool, whether to support comma as an additional separator. + If False then only whitespace is supported. This is intended only for + backwards compatibility with flags that used to be comma-separated. + """ + self._comma_compat = comma_compat + name = 'whitespace or comma' if self._comma_compat else 'whitespace' + super(WhitespaceSeparatedListParser, self).__init__(None, name) + + def parse(self, argument: Union[Text, List[Text]]) -> List[Text]: + """Parses argument as whitespace-separated list of strings. + + It also parses argument as comma-separated list of strings if requested. + + Args: + argument: string argument passed in the commandline. + + Returns: + [str], the parsed flag value. + """ + if isinstance(argument, list): + return argument + elif not argument: + return [] + else: + if self._comma_compat: + argument = argument.replace(',', ' ') + return argument.split() + + def _custom_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = super(WhitespaceSeparatedListParser, self + )._custom_xml_dom_elements(doc) + separators = list(string.whitespace) + if self._comma_compat: + separators.append(',') + separators.sort() + for sep_char in separators: + elements.append(_helpers.create_xml_dom_element( + doc, 'list_separator', repr(sep_char))) + return elements diff --git a/venv/lib/python3.10/site-packages/absl/flags/_defines.py b/venv/lib/python3.10/site-packages/absl/flags/_defines.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b102f21aec6533d6960c04aa63765687e1af7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_defines.py @@ -0,0 +1,1686 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This modules contains flags DEFINE functions. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import enum +import sys +import types +import typing +from typing import Text, List, Any, TypeVar, Optional, Union, Type, Iterable, overload + +from absl.flags import _argument_parser +from absl.flags import _exceptions +from absl.flags import _flag +from absl.flags import _flagvalues +from absl.flags import _helpers +from absl.flags import _validators + +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + +_T = TypeVar('_T') +_ET = TypeVar('_ET', bound=enum.Enum) + + +def _register_bounds_validator_if_needed(parser, name, flag_values): + """Enforces lower and upper bounds for numeric flags. + + Args: + parser: NumericParser (either FloatParser or IntegerParser), provides lower + and upper bounds, and help text to display. + name: str, name of the flag + flag_values: FlagValues. + """ + if parser.lower_bound is not None or parser.upper_bound is not None: + + def checker(value): + if value is not None and parser.is_outside_bounds(value): + message = '%s is not %s' % (value, parser.syntactic_help) + raise _exceptions.ValidationError(message) + return True + + _validators.register_validator(name, checker, flag_values=flag_values) + + +@overload +def DEFINE( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + name: Text, + default: Any, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + serializer: Optional[_argument_parser.ArgumentSerializer[_T]] = ..., + module_name: Optional[Text] = ..., + required: 'typing.Literal[True]' = ..., + **args: Any +) -> _flagvalues.FlagHolder[_T]: + ... + + +@overload +def DEFINE( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + name: Text, + default: Optional[Any], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + serializer: Optional[_argument_parser.ArgumentSerializer[_T]] = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[_T]]: + ... + + +def DEFINE( # pylint: disable=invalid-name + parser, + name, + default, + help, # pylint: disable=redefined-builtin + flag_values=_flagvalues.FLAGS, + serializer=None, + module_name=None, + required=False, + **args): + """Registers a generic Flag object. + + NOTE: in the docstrings of all DEFINE* functions, "registers" is short + for "creates a new flag and registers it". + + Auxiliary function: clients should use the specialized ``DEFINE_`` + function instead. + + Args: + parser: :class:`ArgumentParser`, used to parse the flag arguments. + name: str, the flag name. + default: The default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + serializer: :class:`ArgumentSerializer`, the flag serializer instance. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + return DEFINE_flag( + _flag.Flag(parser, serializer, name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + + +@overload +def DEFINE_flag( # pylint: disable=invalid-name + flag: _flag.Flag[_T], + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: 'typing.Literal[True]' = ..., +) -> _flagvalues.FlagHolder[_T]: + ... + + +@overload +def DEFINE_flag( # pylint: disable=invalid-name + flag: _flag.Flag[_T], + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., +) -> _flagvalues.FlagHolder[Optional[_T]]: + ... + + +def DEFINE_flag( # pylint: disable=invalid-name + flag, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False): + """Registers a :class:`Flag` object with a :class:`FlagValues` object. + + By default, the global :const:`FLAGS` ``FlagValue`` object is used. + + Typical users will use one of the more specialized DEFINE_xxx + functions, such as :func:`DEFINE_string` or :func:`DEFINE_integer`. But + developers who need to create :class:`Flag` objects themselves should use + this function to register their flags. + + Args: + flag: :class:`Flag`, a flag that is key to the module. + flag_values: :class:`FlagValues`, the ``FlagValues`` instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + + Returns: + a handle to defined flag. + """ + if required and flag.default is not None: + raise ValueError('Required flag --%s cannot have a non-None default' % + flag.name) + # Copying the reference to flag_values prevents pychecker warnings. + fv = flag_values + fv[flag.name] = flag + # Tell flag_values who's defining the flag. + if module_name: + module = sys.modules.get(module_name) + else: + module, module_name = _helpers.get_calling_module_object_and_name() + flag_values.register_flag_by_module(module_name, flag) + flag_values.register_flag_by_module_id(id(module), flag) + if required: + _validators.mark_flag_as_required(flag.name, fv) + ensure_non_none_value = (flag.default is not None) or required + return _flagvalues.FlagHolder( + fv, flag, ensure_non_none_value=ensure_non_none_value) + + +def set_default(flag_holder: _flagvalues.FlagHolder[_T], value: _T) -> None: + """Changes the default value of the provided flag object. + + The flag's current value is also updated if the flag is currently using + the default value, i.e. not specified in the command line, and not set + by FLAGS.name = value. + + Args: + flag_holder: FlagHolder, the flag to modify. + value: The new default value. + + Raises: + IllegalFlagValueError: Raised when value is not valid. + """ + flag_holder._flagvalues.set_default(flag_holder.name, value) # pylint: disable=protected-access + + +def override_value(flag_holder: _flagvalues.FlagHolder[_T], value: _T) -> None: + """Overrides the value of the provided flag. + + This value takes precedent over the default value and, when called after flag + parsing, any value provided at the command line. + + Args: + flag_holder: FlagHolder, the flag to modify. + value: The new value. + + Raises: + IllegalFlagValueError: The value did not pass the flag parser or validators. + """ + fv = flag_holder._flagvalues # pylint: disable=protected-access + # Ensure the new value satisfies the flag's parser while avoiding side + # effects of calling parse(). + parsed = fv[flag_holder.name]._parse(value) # pylint: disable=protected-access + if parsed != value: + raise _exceptions.IllegalFlagValueError( + 'flag %s: parsed value %r not equal to original %r' + % (flag_holder.name, parsed, value) + ) + setattr(fv, flag_holder.name, value) + + +def _internal_declare_key_flags( + flag_names: List[str], + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, + key_flag_values: Optional[_flagvalues.FlagValues] = None, +) -> None: + """Declares a flag as key for the calling module. + + Internal function. User code should call declare_key_flag or + adopt_module_key_flags instead. + + Args: + flag_names: [str], a list of names of already-registered Flag objects. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flags listed in flag_names have registered (the value of the flag_values + argument from the ``DEFINE_*`` calls that defined those flags). This + should almost never need to be overridden. + key_flag_values: :class:`FlagValues`, the FlagValues instance that (among + possibly many other things) keeps track of the key flags for each module. + Default ``None`` means "same as flag_values". This should almost never + need to be overridden. + + Raises: + UnrecognizedFlagError: Raised when the flag is not defined. + """ + key_flag_values = key_flag_values or flag_values + + module = _helpers.get_calling_module() + + for flag_name in flag_names: + key_flag_values.register_key_flag_for_module(module, flag_values[flag_name]) + + +def declare_key_flag( + flag_name: Union[Text, _flagvalues.FlagHolder], + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, +) -> None: + """Declares one flag as key to the current module. + + Key flags are flags that are deemed really important for a module. + They are important when listing help messages; e.g., if the + --helpshort command-line flag is used, then only the key flags of the + main module are listed (instead of all flags, as in the case of + --helpfull). + + Sample usage:: + + flags.declare_key_flag('flag_1') + + Args: + flag_name: str | :class:`FlagHolder`, the name or holder of an already + declared flag. (Redeclaring flags as key, including flags implicitly key + because they were declared in this module, is a no-op.) + Positional-only parameter. + flag_values: :class:`FlagValues`, the FlagValues instance in which the + flag will be declared as a key flag. This should almost never need to be + overridden. + + Raises: + ValueError: Raised if flag_name not defined as a Python flag. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + if flag_name in _helpers.SPECIAL_FLAGS: + # Take care of the special flags, e.g., --flagfile, --undefok. + # These flags are defined in SPECIAL_FLAGS, and are treated + # specially during flag parsing, taking precedence over the + # user-defined flags. + _internal_declare_key_flags([flag_name], + flag_values=_helpers.SPECIAL_FLAGS, + key_flag_values=flag_values) + return + try: + _internal_declare_key_flags([flag_name], flag_values=flag_values) + except KeyError: + raise ValueError('Flag --%s is undefined. To set a flag as a key flag ' + 'first define it in Python.' % flag_name) + + +def adopt_module_key_flags( + module: Any, flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS +) -> None: + """Declares that all flags key to a module are key to the current module. + + Args: + module: module, the module object from which all key flags will be declared + as key flags to the current module. + flag_values: :class:`FlagValues`, the FlagValues instance in which the + flags will be declared as key flags. This should almost never need to be + overridden. + + Raises: + Error: Raised when given an argument that is a module name (a string), + instead of a module object. + """ + if not isinstance(module, types.ModuleType): + raise _exceptions.Error('Expected a module object, not %r.' % (module,)) + _internal_declare_key_flags( + [f.name for f in flag_values.get_key_flags_for_module(module.__name__)], + flag_values=flag_values) + # If module is this flag module, take _helpers.SPECIAL_FLAGS into account. + if module == _helpers.FLAGS_MODULE: + _internal_declare_key_flags( + # As we associate flags with get_calling_module_object_and_name(), the + # special flags defined in this module are incorrectly registered with + # a different module. So, we can't use get_key_flags_for_module. + # Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private + # FlagValues, where no other module should register flags). + [_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS], + flag_values=_helpers.SPECIAL_FLAGS, + key_flag_values=flag_values) + + +def disclaim_key_flags() -> None: + """Declares that the current module will not define any more key flags. + + Normally, the module that calls the DEFINE_xxx functions claims the + flag to be its key flag. This is undesirable for modules that + define additional DEFINE_yyy functions with its own flag parsers and + serializers, since that module will accidentally claim flags defined + by DEFINE_yyy as its key flags. After calling this function, the + module disclaims flag definitions thereafter, so the key flags will + be correctly attributed to the caller of DEFINE_yyy. + + After calling this function, the module will not be able to define + any more flags. This function will affect all FlagValues objects. + """ + globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access + module, _ = _helpers.get_module_object_and_name(globals_for_caller) + _helpers.disclaim_module_ids.add(id(module)) + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: Optional[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[Text]]: + ... + + +@overload +def DEFINE_string( # pylint: disable=invalid-name + name: Text, + default: Text, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +def DEFINE_string( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be any string.""" + parser = _argument_parser.ArgumentParser[str]() + serializer = _argument_parser.ArgumentSerializer[str]() + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: Union[None, Text, bool, int], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[bool]: + ... + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[bool]]: + ... + + +@overload +def DEFINE_boolean( # pylint: disable=invalid-name + name: Text, + default: Union[Text, bool, int], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[bool]: + ... + + +def DEFINE_boolean( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a boolean flag. + + Such a boolean flag does not take an argument. If a user wants to + specify a false value explicitly, the long option beginning with 'no' + must be used: i.e. --noflag + + This flag will have a value of None, True or False. None is possible + if default=None and the user does not specify the flag on the command + line. + + Args: + name: str, the flag name. + default: bool|str|None, the default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + return DEFINE_flag( + _flag.BooleanFlag(name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: Union[None, float, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[float]: + ... + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[float]]: + ... + + +@overload +def DEFINE_float( # pylint: disable=invalid-name + name: Text, + default: Union[float, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[float]: + ... + + +def DEFINE_float( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value must be a float. + + If ``lower_bound`` or ``upper_bound`` are set, then this flag must be + within the given range. + + Args: + name: str, the flag name. + default: float|str|None, the default value of the flag. + help: str, the help message. + lower_bound: float, min value of the flag. + upper_bound: float, max value of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to :func:`DEFINE`. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.FloatParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + result = DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) + return result + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: Union[None, int, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[int]: + ... + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: None, + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[int]]: + ... + + +@overload +def DEFINE_integer( # pylint: disable=invalid-name + name: Text, + default: Union[int, Text], + help: Optional[Text], # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[int]: + ... + + +def DEFINE_integer( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value must be an integer. + + If ``lower_bound``, or ``upper_bound`` are set, then this flag must be + within the given range. + + Args: + name: str, the flag name. + default: int|str|None, the default value of the flag. + help: str, the help message. + lower_bound: int, min value of the flag. + upper_bound: int, max value of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to :func:`DEFINE`. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.IntegerParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + result = DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) + return result + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: Optional[Text], + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: None, + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[Text]]: + ... + + +@overload +def DEFINE_enum( # pylint: disable=invalid-name + name: Text, + default: Text, + enum_values: Iterable[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Text]: + ... + + +def DEFINE_enum( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_values, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a flag whose value can be any string from enum_values. + + Instead of a string enum, prefer `DEFINE_enum_class`, which allows + defining enums from an `enum.Enum` class. + + Args: + name: str, the flag name. + default: str|None, the default value of the flag. + enum_values: [str], a non-empty list of strings with the possible values for + the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + result = DEFINE_flag( + _flag.EnumFlag(name, default, help, enum_values, **args), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[None, _ET, Text], + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[_ET]: + ... + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: None, + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[_ET]]: + ... + + +@overload +def DEFINE_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[_ET, Text], + enum_class: Type[_ET], + help: Optional[Text], # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + case_sensitive: bool = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[_ET]: + ... + + +def DEFINE_enum_class( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_class, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + case_sensitive=False, + required=False, + **args): + """Registers a flag whose value can be the name of enum members. + + Args: + name: str, the flag name. + default: Enum|str|None, the default value of the flag. + enum_class: class, the Enum class with all the possible values for the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: str, the name of the Python module declaring this flag. If not + provided, it will be computed using the stack trace of this call. + case_sensitive: bool, whether to map strings to members of the enum_class + without considering case. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: dict, the extra keyword args that are passed to ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + # NOTE: pytype fails if this is a direct return. + result = DEFINE_flag( + _flag.EnumClassFlag( + name, default, help, enum_class, case_sensitive=case_sensitive, **args + ), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_list( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_list( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value is a comma-separated list of strings. + + The flag value is parsed with a CSV parser. + + Args: + name: str, the flag name. + default: list|str|None, the default value of the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.ListParser() + serializer = _argument_parser.CsvListSerializer(',') + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_spaceseplist( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + comma_compat: bool = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + comma_compat=False, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value is a whitespace-separated list of strings. + + Any whitespace can be used as a separator. + + Args: + name: str, the flag name. + default: list|str|None, the default value of the flag. + help: str, the help message. + comma_compat: bool - Whether to support comma as an additional separator. If + false then only whitespace is supported. This is intended only for + backwards compatibility with flags that used to be comma-separated. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.WhitespaceSeparatedListParser( + comma_compat=comma_compat) + serializer = _argument_parser.ListSerializer(' ') + return DEFINE( + parser, + name, + default, + help, + flag_values, + serializer, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Iterable[_T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Union[None, _T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[_T]]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: Iterable[_T], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +@overload +def DEFINE_multi( # pylint: disable=invalid-name + parser: _argument_parser.ArgumentParser[_T], + serializer: _argument_parser.ArgumentSerializer[_T], + name: Text, + default: _T, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_T]]: + ... + + +def DEFINE_multi( # pylint: disable=invalid-name,redefined-builtin + parser, + serializer, + name, + default, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + required=False, + **args): + """Registers a generic MultiFlag that parses its args with a given parser. + + Auxiliary function. Normal users should NOT use it directly. + + Developers who need to create their own 'Parser' classes for options + which can appear multiple times can call this module function to + register their flags. + + Args: + parser: ArgumentParser, used to parse the flag arguments. + serializer: ArgumentSerializer, the flag serializer instance. + name: str, the flag name. + default: Union[Iterable[T], Text, None], the default value of the flag. If + the value is text, it will be parsed as if it was provided from the + command line. If the value is a non-string iterable, it will be iterated + over to create a shallow copy of the values. If it is None, it is left + as-is. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the Python module declaring this flag. If + not provided, it will be computed using the stack trace of this call. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + result = DEFINE_flag( + _flag.MultiFlag(parser, serializer, name, default, help, **args), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_multi_string( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_multi_string( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of any strings. + + Use the flag on the command line multiple times to place multiple + string values into the list. The 'default' may be a single string + (which will be converted into a single-element list) or a list of + strings. + + + Args: + name: str, the flag name. + default: Union[Iterable[Text], Text, None], the default value of the flag; + see :func:`DEFINE_multi`. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.ArgumentParser() + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[int], int, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[int]]: + ... + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[int]]]: + ... + + +@overload +def DEFINE_multi_integer( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[int], int, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[int] = ..., + upper_bound: Optional[int] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[int]]: + ... + + +def DEFINE_multi_integer( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of arbitrary integers. + + Use the flag on the command line multiple times to place multiple + integer values into the list. The 'default' may be a single integer + (which will be converted into a single-element list) or a list of + integers. + + Args: + name: str, the flag name. + default: Union[Iterable[int], Text, None], the default value of the flag; + see `DEFINE_multi`. + help: str, the help message. + lower_bound: int, min values of the flag. + upper_bound: int, max values of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.IntegerParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[float], float, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[float]]: + ... + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: None, + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[float]]]: + ... + + +@overload +def DEFINE_multi_float( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[float], float, Text], + help: Text, # pylint: disable=redefined-builtin + lower_bound: Optional[float] = ..., + upper_bound: Optional[float] = ..., + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[float]]: + ... + + +def DEFINE_multi_float( # pylint: disable=invalid-name,redefined-builtin + name, + default, + help, + lower_bound=None, + upper_bound=None, + flag_values=_flagvalues.FLAGS, + required=False, + **args): + """Registers a flag whose value can be a list of arbitrary floats. + + Use the flag on the command line multiple times to place multiple + float values into the list. The 'default' may be a single float + (which will be converted into a single-element list) or a list of + floats. + + Args: + name: str, the flag name. + default: Union[Iterable[float], Text, None], the default value of the flag; + see `DEFINE_multi`. + help: str, the help message. + lower_bound: float, min values of the flag. + upper_bound: float, max values of the flag. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.FloatParser(lower_bound, upper_bound) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + help, + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: Union[None, Iterable[Text], Text], + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: None, + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[Text]]]: + ... + + +@overload +def DEFINE_multi_enum( # pylint: disable=invalid-name + name: Text, + default: Union[Iterable[Text], Text], + enum_values: Iterable[Text], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[Text]]: + ... + + +def DEFINE_multi_enum( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_values, + help, + flag_values=_flagvalues.FLAGS, + case_sensitive=True, + required=False, + **args): + """Registers a flag whose value can be a list strings from enum_values. + + Use the flag on the command line multiple times to place multiple + enum values into the list. The 'default' may be a single string + (which will be converted into a single-element list) or a list of + strings. + + Args: + name: str, the flag name. + default: Union[Iterable[Text], Text, None], the default value of the flag; + see `DEFINE_multi`. + enum_values: [str], a non-empty list of strings with the possible values for + the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + case_sensitive: Whether or not the enum is to be case-sensitive. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + parser = _argument_parser.EnumParser(enum_values, case_sensitive) + serializer = _argument_parser.ArgumentSerializer() + return DEFINE_multi( + parser, + serializer, + name, + default, + '<%s>: %s' % ('|'.join(enum_values), help), + flag_values, + required=True if required else False, + **args, + ) + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + # This is separate from `Union[None, _ET, Iterable[Text], Text]` to avoid a + # Pytype issue inferring the return value to + # FlagHolder[List[Union[_ET, enum.Enum]]] when an iterable of concrete enum + # subclasses are used. + default: Iterable[_ET], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[None, _ET, Iterable[Text], Text], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + *, + required: 'typing.Literal[True]', + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: None, + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[Optional[List[_ET]]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + # This is separate from `Union[None, _ET, Iterable[Text], Text]` to avoid a + # Pytype issue inferring the return value to + # FlagHolder[List[Union[_ET, enum.Enum]]] when an iterable of concrete enum + # subclasses are used. + default: Iterable[_ET], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +@overload +def DEFINE_multi_enum_class( # pylint: disable=invalid-name + name: Text, + default: Union[_ET, Iterable[Text], Text], + enum_class: Type[_ET], + help: Text, # pylint: disable=redefined-builtin + flag_values: _flagvalues.FlagValues = ..., + module_name: Optional[Text] = ..., + required: bool = ..., + **args: Any +) -> _flagvalues.FlagHolder[List[_ET]]: + ... + + +def DEFINE_multi_enum_class( # pylint: disable=invalid-name,redefined-builtin + name, + default, + enum_class, + help, + flag_values=_flagvalues.FLAGS, + module_name=None, + case_sensitive=False, + required=False, + **args): + """Registers a flag whose value can be a list of enum members. + + Use the flag on the command line multiple times to place multiple + enum values into the list. + + Args: + name: str, the flag name. + default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the + default value of the flag; see `DEFINE_multi`; only differences are + documented here. If the value is a single Enum, it is treated as a + single-item list of that Enum value. If it is an iterable, text values + within the iterable will be converted to the equivalent Enum objects. + enum_class: class, the Enum class with all the possible values for the flag. + help: str, the help message. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the Python module declaring this flag. If + not provided, it will be computed using the stack trace of this call. + case_sensitive: bool, whether to map strings to members of the enum_class + without considering case. + required: bool, is this a required flag. This must be used as a keyword + argument. + **args: Dictionary with extra keyword args that are passed to the + ``Flag.__init__``. + + Returns: + a handle to defined flag. + """ + # NOTE: pytype fails if this is a direct return. + result = DEFINE_flag( + _flag.MultiEnumClassFlag( + name, + default, + help, + enum_class, + case_sensitive=case_sensitive, + **args, + ), + flag_values, + module_name, + required=True if required else False, + ) + return result + + +def DEFINE_alias( # pylint: disable=invalid-name + name: Text, + original_name: Text, + flag_values: _flagvalues.FlagValues = _flagvalues.FLAGS, + module_name: Optional[Text] = None, +) -> _flagvalues.FlagHolder[Any]: + """Defines an alias flag for an existing one. + + Args: + name: str, the flag name. + original_name: str, the original flag name. + flag_values: :class:`FlagValues`, the FlagValues instance with which the + flag will be registered. This should almost never need to be overridden. + module_name: A string, the name of the module that defines this flag. + + Returns: + a handle to defined flag. + + Raises: + flags.FlagError: + UnrecognizedFlagError: if the referenced flag doesn't exist. + DuplicateFlagError: if the alias name has been used by some existing flag. + """ + if original_name not in flag_values: + raise _exceptions.UnrecognizedFlagError(original_name) + flag = flag_values[original_name] + + class _FlagAlias(_flag.Flag): + """Overrides Flag class so alias value is copy of original flag value.""" + + def parse(self, argument): + flag.parse(argument) + self.present += 1 + + def _parse_from_default(self, value): + # The value was already parsed by the aliased flag, so there is no + # need to call the parser on it a second time. + # Additionally, because of how MultiFlag parses and merges values, + # it isn't possible to delegate to the aliased flag and still get + # the correct values. + return value + + @property + def value(self): + return flag.value + + @value.setter + def value(self, value): + flag.value = value + + help_msg = 'Alias for --%s.' % flag.name + # If alias_name has been used, flags.DuplicatedFlag will be raised. + return DEFINE_flag( + _FlagAlias( + flag.parser, + flag.serializer, + name, + flag.default, + help_msg, + boolean=flag.boolean), flag_values, module_name) diff --git a/venv/lib/python3.10/site-packages/absl/flags/_exceptions.py b/venv/lib/python3.10/site-packages/absl/flags/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b569d9460e31622da8d1c826ea31400fb2d74fcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_exceptions.py @@ -0,0 +1,108 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Exception classes in ABSL flags library. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import sys + +from absl.flags import _helpers + + +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + + +class Error(Exception): + """The base class for all flags errors.""" + + +class CantOpenFlagFileError(Error): + """Raised when flagfile fails to open. + + E.g. the file doesn't exist, or has wrong permissions. + """ + + +class DuplicateFlagError(Error): + """Raised if there is a flag naming conflict.""" + + @classmethod + def from_flag(cls, flagname, flag_values, other_flag_values=None): + """Creates a DuplicateFlagError by providing flag name and values. + + Args: + flagname: str, the name of the flag being redefined. + flag_values: :class:`FlagValues`, the FlagValues instance containing the + first definition of flagname. + other_flag_values: :class:`FlagValues`, if it is not None, it should be + the FlagValues object where the second definition of flagname occurs. + If it is None, we assume that we're being called when attempting to + create the flag a second time, and we use the module calling this one + as the source of the second definition. + + Returns: + An instance of DuplicateFlagError. + """ + first_module = flag_values.find_module_defining_flag( + flagname, default='') + if other_flag_values is None: + second_module = _helpers.get_calling_module() + else: + second_module = other_flag_values.find_module_defining_flag( + flagname, default='') + flag_summary = flag_values[flagname].help + msg = ("The flag '%s' is defined twice. First from %s, Second from %s. " + "Description from first occurrence: %s") % ( + flagname, first_module, second_module, flag_summary) + return cls(msg) + + +class IllegalFlagValueError(Error): + """Raised when the flag command line argument is illegal.""" + + +class UnrecognizedFlagError(Error): + """Raised when a flag is unrecognized. + + Attributes: + flagname: str, the name of the unrecognized flag. + flagvalue: The value of the flag, empty if the flag is not defined. + """ + + def __init__(self, flagname, flagvalue='', suggestions=None): + self.flagname = flagname + self.flagvalue = flagvalue + if suggestions: + # Space before the question mark is intentional to not include it in the + # selection when copy-pasting the suggestion from (some) terminals. + tip = '. Did you mean: %s ?' % ', '.join(suggestions) + else: + tip = '' + super(UnrecognizedFlagError, self).__init__( + 'Unknown command line flag \'%s\'%s' % (flagname, tip)) + + +class UnparsedFlagAccessError(Error): + """Raised when accessing the flag value from unparsed :class:`FlagValues`.""" + + +class ValidationError(Error): + """Raised when flag validator constraint is not satisfied.""" + + +class FlagNameConflictsWithMethodError(Error): + """Raised when a flag name conflicts with :class:`FlagValues` methods.""" diff --git a/venv/lib/python3.10/site-packages/absl/flags/_flag.py b/venv/lib/python3.10/site-packages/absl/flags/_flag.py new file mode 100644 index 0000000000000000000000000000000000000000..67117880c02b36de1291c12cd37e4906d932996f --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_flag.py @@ -0,0 +1,556 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains Flag class - information about single command-line flag. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +from collections import abc +import copy +import enum +import functools +from typing import Any, Dict, Generic, Iterable, List, Optional, Text, Type, TypeVar, Union +from xml.dom import minidom + +from absl.flags import _argument_parser +from absl.flags import _exceptions +from absl.flags import _helpers + +_T = TypeVar('_T') +_ET = TypeVar('_ET', bound=enum.Enum) + + +@functools.total_ordering +class Flag(Generic[_T]): + """Information about a command-line flag. + + Attributes: + name: the name for this flag + default: the default value for this flag + default_unparsed: the unparsed default value for this flag. + default_as_str: default value as repr'd string, e.g., "'true'" + (or None) + value: the most recent parsed value of this flag set by :meth:`parse` + help: a help string or None if no help is available + short_name: the single letter alias for this flag (or None) + boolean: if 'true', this flag does not accept arguments + present: true if this flag was parsed from command line flags + parser: an :class:`~absl.flags.ArgumentParser` object + serializer: an ArgumentSerializer object + allow_override: the flag may be redefined without raising an error, + and newly defined flag overrides the old one. + allow_override_cpp: use the flag from C++ if available the flag + definition is replaced by the C++ flag after init + allow_hide_cpp: use the Python flag despite having a C++ flag with + the same name (ignore the C++ flag) + using_default_value: the flag value has not been set by user + allow_overwrite: the flag may be parsed more than once without + raising an error, the last set value will be used + allow_using_method_names: whether this flag can be defined even if + it has a name that conflicts with a FlagValues method. + validators: list of the flag validators. + + The only public method of a ``Flag`` object is :meth:`parse`, but it is + typically only called by a :class:`~absl.flags.FlagValues` object. The + :meth:`parse` method is a thin wrapper around the + :meth:`ArgumentParser.parse()` method. The + parsed value is saved in ``.value``, and the ``.present`` attribute is + updated. If this flag was already present, an Error is raised. + + :meth:`parse` is also called during ``__init__`` to parse the default value + and initialize the ``.value`` attribute. This enables other python modules to + safely use flags even if the ``__main__`` module neglects to parse the + command line arguments. The ``.present`` attribute is cleared after + ``__init__`` parsing. If the default value is set to ``None``, then the + ``__init__`` parsing step is skipped and the ``.value`` attribute is + initialized to None. + + Note: The default value is also presented to the user in the help + string, so it is important that it be a legal value for this flag. + """ + + # NOTE: pytype doesn't find defaults without this. + default: Optional[_T] + default_as_str: Optional[Text] + default_unparsed: Union[Optional[_T], Text] + + def __init__( + self, + parser: _argument_parser.ArgumentParser[_T], + serializer: Optional[_argument_parser.ArgumentSerializer[_T]], + name: Text, + default: Union[Optional[_T], Text], + help_string: Optional[Text], + short_name: Optional[Text] = None, + boolean: bool = False, + allow_override: bool = False, + allow_override_cpp: bool = False, + allow_hide_cpp: bool = False, + allow_overwrite: bool = True, + allow_using_method_names: bool = False, + ) -> None: + self.name = name + + if not help_string: + help_string = '(no help available)' + + self.help = help_string + self.short_name = short_name + self.boolean = boolean + self.present = 0 + self.parser = parser + self.serializer = serializer + self.allow_override = allow_override + self.allow_override_cpp = allow_override_cpp + self.allow_hide_cpp = allow_hide_cpp + self.allow_overwrite = allow_overwrite + self.allow_using_method_names = allow_using_method_names + + self.using_default_value = True + self._value = None + self.validators = [] + if self.allow_hide_cpp and self.allow_override_cpp: + raise _exceptions.Error( + "Can't have both allow_hide_cpp (means use Python flag) and " + 'allow_override_cpp (means use C++ flag after InitGoogle)') + + self._set_default(default) + + @property + def value(self) -> Optional[_T]: + return self._value + + @value.setter + def value(self, value: Optional[_T]): + self._value = value + + def __hash__(self): + return hash(id(self)) + + def __eq__(self, other): + return self is other + + def __lt__(self, other): + if isinstance(other, Flag): + return id(self) < id(other) + return NotImplemented + + def __bool__(self): + raise TypeError('A Flag instance would always be True. ' + 'Did you mean to test the `.value` attribute?') + + def __getstate__(self): + raise TypeError("can't pickle Flag objects") + + def __copy__(self): + raise TypeError('%s does not support shallow copies. ' + 'Use copy.deepcopy instead.' % type(self).__name__) + + def __deepcopy__(self, memo: Dict[int, Any]) -> 'Flag[_T]': + result = object.__new__(type(self)) + result.__dict__ = copy.deepcopy(self.__dict__, memo) + return result + + def _get_parsed_value_as_string(self, value: Optional[_T]) -> Optional[Text]: + """Returns parsed flag value as string.""" + if value is None: + return None + if self.serializer: + return repr(self.serializer.serialize(value)) + if self.boolean: + if value: + return repr('true') + else: + return repr('false') + return repr(str(value)) + + def parse(self, argument: Union[Text, Optional[_T]]) -> None: + """Parses string and sets flag value. + + Args: + argument: str or the correct flag value type, argument to be parsed. + """ + if self.present and not self.allow_overwrite: + raise _exceptions.IllegalFlagValueError( + 'flag --%s=%s: already defined as %s' % ( + self.name, argument, self.value)) + self.value = self._parse(argument) + self.present += 1 + + def _parse(self, argument: Union[Text, _T]) -> Optional[_T]: + """Internal parse function. + + It returns the parsed value, and does not modify class states. + + Args: + argument: str or the correct flag value type, argument to be parsed. + + Returns: + The parsed value. + """ + try: + return self.parser.parse(argument) + except (TypeError, ValueError) as e: # Recast as IllegalFlagValueError. + raise _exceptions.IllegalFlagValueError( + 'flag --%s=%s: %s' % (self.name, argument, e)) + + def unparse(self) -> None: + self.value = self.default + self.using_default_value = True + self.present = 0 + + def serialize(self) -> Text: + """Serializes the flag.""" + return self._serialize(self.value) + + def _serialize(self, value: Optional[_T]) -> Text: + """Internal serialize function.""" + if value is None: + return '' + if self.boolean: + if value: + return '--%s' % self.name + else: + return '--no%s' % self.name + else: + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name) + return '--%s=%s' % (self.name, self.serializer.serialize(value)) + + def _set_default(self, value: Union[Optional[_T], Text]) -> None: + """Changes the default value (and current value too) for this Flag.""" + self.default_unparsed = value + if value is None: + self.default = None + else: + self.default = self._parse_from_default(value) + self.default_as_str = self._get_parsed_value_as_string(self.default) + if self.using_default_value: + self.value = self.default + + # This is split out so that aliases can skip regular parsing of the default + # value. + def _parse_from_default(self, value: Union[Text, _T]) -> Optional[_T]: + return self._parse(value) + + def flag_type(self) -> Text: + """Returns a str that describes the type of the flag. + + NOTE: we use strings, and not the types.*Type constants because + our flags can have more exotic types, e.g., 'comma separated list + of strings', 'whitespace separated list of strings', etc. + """ + return self.parser.flag_type() + + def _create_xml_dom_element( + self, doc: minidom.Document, module_name: str, is_key: bool = False + ) -> minidom.Element: + """Returns an XML element that contains this flag's information. + + This is information that is relevant to all flags (e.g., name, + meaning, etc.). If you defined a flag that has some other pieces of + info, then please override _ExtraXMLInfo. + + Please do NOT override this method. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + module_name: str,, the name of the module that defines this flag. + is_key: boolean, True iff this flag is key for main module. + + Returns: + A minidom.Element instance. + """ + element = doc.createElement('flag') + if is_key: + element.appendChild(_helpers.create_xml_dom_element(doc, 'key', 'yes')) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'file', module_name)) + # Adds flag features that are relevant for all flags. + element.appendChild(_helpers.create_xml_dom_element(doc, 'name', self.name)) + if self.short_name: + element.appendChild(_helpers.create_xml_dom_element( + doc, 'short_name', self.short_name)) + if self.help: + element.appendChild(_helpers.create_xml_dom_element( + doc, 'meaning', self.help)) + # The default flag value can either be represented as a string like on the + # command line, or as a Python object. We serialize this value in the + # latter case in order to remain consistent. + if self.serializer and not isinstance(self.default, str): + if self.default is not None: + default_serialized = self.serializer.serialize(self.default) + else: + default_serialized = '' + else: + default_serialized = self.default + element.appendChild(_helpers.create_xml_dom_element( + doc, 'default', default_serialized)) + value_serialized = self._serialize_value_for_xml(self.value) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'current', value_serialized)) + element.appendChild(_helpers.create_xml_dom_element( + doc, 'type', self.flag_type())) + # Adds extra flag features this flag may have. + for e in self._extra_xml_dom_elements(doc): + element.appendChild(e) + return element + + def _serialize_value_for_xml(self, value: Optional[_T]) -> Any: + """Returns the serialized value, for use in an XML help text.""" + return value + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + """Returns extra info about this flag in XML. + + "Extra" means "not already included by _create_xml_dom_element above." + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + + Returns: + A list of minidom.Element. + """ + # Usually, the parser knows the extra details about the flag, so + # we just forward the call to it. + return self.parser._custom_xml_dom_elements(doc) # pylint: disable=protected-access + + +class BooleanFlag(Flag[bool]): + """Basic boolean flag. + + Boolean flags do not take any arguments, and their value is either + ``True`` (1) or ``False`` (0). The false value is specified on the command + line by prepending the word ``'no'`` to either the long or the short flag + name. + + For example, if a Boolean flag was created whose long name was + ``'update'`` and whose short name was ``'x'``, then this flag could be + explicitly unset through either ``--noupdate`` or ``--nox``. + """ + + def __init__( + self, + name: Text, + default: Union[Optional[bool], Text], + help: Optional[Text], # pylint: disable=redefined-builtin + short_name: Optional[Text] = None, + **args + ) -> None: + p = _argument_parser.BooleanParser() + super(BooleanFlag, self).__init__( + p, None, name, default, help, short_name, True, **args + ) + + +class EnumFlag(Flag[Text]): + """Basic enum flag; its value can be any string from list of enum_values.""" + + def __init__( + self, + name: Text, + default: Optional[Text], + help: Optional[Text], # pylint: disable=redefined-builtin + enum_values: Iterable[Text], + short_name: Optional[Text] = None, + case_sensitive: bool = True, + **args + ): + p = _argument_parser.EnumParser(enum_values, case_sensitive) + g = _argument_parser.ArgumentSerializer() + super(EnumFlag, self).__init__( + p, g, name, default, help, short_name, **args) + # NOTE: parser should be typed EnumParser but the constructor + # restricts the available interface to ArgumentParser[str]. + self.parser = p + self.help = '<%s>: %s' % ('|'.join(p.enum_values), self.help) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_values: + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class EnumClassFlag(Flag[_ET]): + """Basic enum flag; its value is an enum class's member.""" + + def __init__( + self, + name: Text, + default: Union[Optional[_ET], Text], + help: Optional[Text], # pylint: disable=redefined-builtin + enum_class: Type[_ET], + short_name: Optional[Text] = None, + case_sensitive: bool = False, + **args + ): + p = _argument_parser.EnumClassParser( + enum_class, case_sensitive=case_sensitive) + g = _argument_parser.EnumClassSerializer(lowercase=not case_sensitive) + super(EnumClassFlag, self).__init__( + p, g, name, default, help, short_name, **args) + # NOTE: parser should be typed EnumClassParser[_ET] but the constructor + # restricts the available interface to ArgumentParser[_ET]. + self.parser = p + self.help = '<%s>: %s' % ('|'.join(p.member_names), self.help) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_class.__members__.keys(): + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class MultiFlag(Generic[_T], Flag[List[_T]]): + """A flag that can appear multiple time on the command-line. + + The value of such a flag is a list that contains the individual values + from all the appearances of that flag on the command-line. + + See the __doc__ for Flag for most behavior of this class. Only + differences in behavior are described here: + + * The default value may be either a single value or an iterable of values. + A single value is transformed into a single-item list of that value. + + * The value of the flag is always a list, even if the option was + only supplied once, and even if the default value is a single + value + """ + + def __init__(self, *args, **kwargs): + super(MultiFlag, self).__init__(*args, **kwargs) + self.help += ';\n repeat this option to specify a list of values' + + def parse(self, arguments: Union[Text, _T, Iterable[_T]]): # pylint: disable=arguments-renamed + """Parses one or more arguments with the installed parser. + + Args: + arguments: a single argument or a list of arguments (typically a + list of default values); a single argument is converted + internally into a list containing one item. + """ + new_values = self._parse(arguments) + if self.present: + self.value.extend(new_values) + else: + self.value = new_values + self.present += len(new_values) + + def _parse(self, arguments: Union[Text, Optional[Iterable[_T]]]) -> List[_T]: # pylint: disable=arguments-renamed + if (isinstance(arguments, abc.Iterable) and + not isinstance(arguments, str)): + arguments = list(arguments) + + if not isinstance(arguments, list): + # Default value may be a list of values. Most other arguments + # will not be, so convert them into a single-item list to make + # processing simpler below. + arguments = [arguments] + + return [super(MultiFlag, self)._parse(item) for item in arguments] + + def _serialize(self, value: Optional[List[_T]]) -> Text: + """See base class.""" + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name) + if value is None: + return '' + + serialized_items = [ + super(MultiFlag, self)._serialize(value_item) for value_item in value + ] + + return '\n'.join(serialized_items) + + def flag_type(self): + """See base class.""" + return 'multi ' + self.parser.flag_type() + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + if hasattr(self.parser, 'enum_values'): + for enum_value in self.parser.enum_values: # pytype: disable=attribute-error + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + +class MultiEnumClassFlag(MultiFlag[_ET]): # pytype: disable=not-indexable + """A multi_enum_class flag. + + See the __doc__ for MultiFlag for most behaviors of this class. In addition, + this class knows how to handle enum.Enum instances as values for this flag + type. + """ + + def __init__( + self, + name: str, + default: Union[None, Iterable[_ET], _ET, Iterable[Text], Text], + help_string: str, + enum_class: Type[_ET], + case_sensitive: bool = False, + **args + ): + p = _argument_parser.EnumClassParser( + enum_class, case_sensitive=case_sensitive) + g = _argument_parser.EnumClassListSerializer( + list_sep=',', lowercase=not case_sensitive) + super(MultiEnumClassFlag, self).__init__( + p, g, name, default, help_string, **args) + # NOTE: parser should be typed EnumClassParser[_ET] but the constructor + # restricts the available interface to ArgumentParser[str]. + self.parser = p + # NOTE: serializer should be non-Optional but this isn't inferred. + self.serializer = g + self.help = ( + '<%s>: %s;\n repeat this option to specify a list of values' % + ('|'.join(p.member_names), help_string or '(no help available)')) + + def _extra_xml_dom_elements( + self, doc: minidom.Document + ) -> List[minidom.Element]: + elements = [] + for enum_value in self.parser.enum_class.__members__.keys(): # pytype: disable=attribute-error + elements.append(_helpers.create_xml_dom_element( + doc, 'enum_value', enum_value)) + return elements + + def _serialize_value_for_xml(self, value): + """See base class.""" + if value is not None: + if not self.serializer: + raise _exceptions.Error( + 'Serializer not present for flag %s' % self.name + ) + value_serialized = self.serializer.serialize(value) + else: + value_serialized = '' + return value_serialized diff --git a/venv/lib/python3.10/site-packages/absl/flags/_flagvalues.py b/venv/lib/python3.10/site-packages/absl/flags/_flagvalues.py new file mode 100644 index 0000000000000000000000000000000000000000..e25f1d3e100b80aafb51b3a5b5a25772d2b6119c --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_flagvalues.py @@ -0,0 +1,1480 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Defines the FlagValues class - registry of 'Flag' objects. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import copy +import itertools +import logging +import os +import sys +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, Text, TextIO, Generic, TypeVar, Union, Tuple +from xml.dom import minidom + +from absl.flags import _exceptions +from absl.flags import _flag +from absl.flags import _helpers +from absl.flags import _validators_classes +from absl.flags._flag import Flag + +# Add flagvalues module to disclaimed module ids. +_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) + +_T = TypeVar('_T') + + +class FlagValues: + """Registry of :class:`~absl.flags.Flag` objects. + + A :class:`FlagValues` can then scan command line arguments, passing flag + arguments through to the 'Flag' objects that it owns. It also + provides easy access to the flag values. Typically only one + :class:`FlagValues` object is needed by an application: + :const:`FLAGS`. + + This class is heavily overloaded: + + :class:`Flag` objects are registered via ``__setitem__``:: + + FLAGS['longname'] = x # register a new flag + + The ``.value`` attribute of the registered :class:`~absl.flags.Flag` objects + can be accessed as attributes of this :class:`FlagValues` object, through + ``__getattr__``. Both the long and short name of the original + :class:`~absl.flags.Flag` objects can be used to access its value:: + + FLAGS.longname # parsed flag value + FLAGS.x # parsed flag value (short name) + + Command line arguments are scanned and passed to the registered + :class:`~absl.flags.Flag` objects through the ``__call__`` method. Unparsed + arguments, including ``argv[0]`` (e.g. the program name) are returned:: + + argv = FLAGS(sys.argv) # scan command line arguments + + The original registered :class:`~absl.flags.Flag` objects can be retrieved + through the use of the dictionary-like operator, ``__getitem__``:: + + x = FLAGS['longname'] # access the registered Flag object + + The ``str()`` operator of a :class:`absl.flags.FlagValues` object provides + help for all of the registered :class:`~absl.flags.Flag` objects. + """ + + _HAS_DYNAMIC_ATTRIBUTES = True + + # A note on collections.abc.Mapping: + # FlagValues defines __getitem__, __iter__, and __len__. It makes perfect + # sense to let it be a collections.abc.Mapping class. However, we are not + # able to do so. The mixin methods, e.g. keys, values, are not uncommon flag + # names. Those flag values would not be accessible via the FLAGS.xxx form. + + __dict__: Dict[str, Any] + + def __init__(self): + # Since everything in this class is so heavily overloaded, the only + # way of defining and using fields is to access __dict__ directly. + + # Dictionary: flag name (string) -> Flag object. + self.__dict__['__flags'] = {} + + # Set: name of hidden flag (string). + # Holds flags that should not be directly accessible from Python. + self.__dict__['__hiddenflags'] = set() + + # Dictionary: module name (string) -> list of Flag objects that are defined + # by that module. + self.__dict__['__flags_by_module'] = {} + # Dictionary: module id (int) -> list of Flag objects that are defined by + # that module. + self.__dict__['__flags_by_module_id'] = {} + # Dictionary: module name (string) -> list of Flag objects that are + # key for that module. + self.__dict__['__key_flags_by_module'] = {} + + # Bool: True if flags were parsed. + self.__dict__['__flags_parsed'] = False + + # Bool: True if unparse_flags() was called. + self.__dict__['__unparse_flags_called'] = False + + # None or Method(name, value) to call from __setattr__ for an unknown flag. + self.__dict__['__set_unknown'] = None + + # A set of banned flag names. This is to prevent users from accidentally + # defining a flag that has the same name as a method on this class. + # Users can still allow defining the flag by passing + # allow_using_method_names=True in DEFINE_xxx functions. + self.__dict__['__banned_flag_names'] = frozenset(dir(FlagValues)) + + # Bool: Whether to use GNU style scanning. + self.__dict__['__use_gnu_getopt'] = True + + # Bool: Whether use_gnu_getopt has been explicitly set by the user. + self.__dict__['__use_gnu_getopt_explicitly_set'] = False + + # Function: Takes a flag name as parameter, returns a tuple + # (is_retired, type_is_bool). + self.__dict__['__is_retired_flag_func'] = None + + def set_gnu_getopt(self, gnu_getopt: bool = True) -> None: + """Sets whether or not to use GNU style scanning. + + GNU style allows mixing of flag and non-flag arguments. See + http://docs.python.org/library/getopt.html#getopt.gnu_getopt + + Args: + gnu_getopt: bool, whether or not to use GNU style scanning. + """ + self.__dict__['__use_gnu_getopt'] = gnu_getopt + self.__dict__['__use_gnu_getopt_explicitly_set'] = True + + def is_gnu_getopt(self) -> bool: + return self.__dict__['__use_gnu_getopt'] + + def _flags(self) -> Dict[Text, Flag]: + return self.__dict__['__flags'] + + def flags_by_module_dict(self) -> Dict[Text, List[Flag]]: + """Returns the dictionary of module_name -> list of defined flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__flags_by_module'] + + def flags_by_module_id_dict(self) -> Dict[int, List[Flag]]: + """Returns the dictionary of module_id -> list of defined flags. + + Returns: + A dictionary. Its keys are module IDs (ints). Its values + are lists of Flag objects. + """ + return self.__dict__['__flags_by_module_id'] + + def key_flags_by_module_dict(self) -> Dict[Text, List[Flag]]: + """Returns the dictionary of module_name -> list of key flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__key_flags_by_module'] + + def register_flag_by_module(self, module_name: Text, flag: Flag) -> None: + """Records the module that defines a specific flag. + + We keep track of which flag is defined by which module so that we + can later sort the flags by module. + + Args: + module_name: str, the name of a Python module. + flag: Flag, the Flag instance that is key to the module. + """ + flags_by_module = self.flags_by_module_dict() + flags_by_module.setdefault(module_name, []).append(flag) + + def register_flag_by_module_id(self, module_id: int, flag: Flag) -> None: + """Records the module that defines a specific flag. + + Args: + module_id: int, the ID of the Python module. + flag: Flag, the Flag instance that is key to the module. + """ + flags_by_module_id = self.flags_by_module_id_dict() + flags_by_module_id.setdefault(module_id, []).append(flag) + + def register_key_flag_for_module(self, module_name: Text, flag: Flag) -> None: + """Specifies that a flag is a key flag for a module. + + Args: + module_name: str, the name of a Python module. + flag: Flag, the Flag instance that is key to the module. + """ + key_flags_by_module = self.key_flags_by_module_dict() + # The list of key flags for the module named module_name. + key_flags = key_flags_by_module.setdefault(module_name, []) + # Add flag, but avoid duplicates. + if flag not in key_flags: + key_flags.append(flag) + + def _flag_is_registered(self, flag_obj: Flag) -> bool: + """Checks whether a Flag object is registered under long name or short name. + + Args: + flag_obj: Flag, the Flag instance to check for. + + Returns: + bool, True iff flag_obj is registered under long name or short name. + """ + flag_dict = self._flags() + # Check whether flag_obj is registered under its long name. + name = flag_obj.name + if flag_dict.get(name, None) == flag_obj: + return True + # Check whether flag_obj is registered under its short name. + short_name = flag_obj.short_name + if (short_name is not None and flag_dict.get(short_name, None) == flag_obj): + return True + return False + + def _cleanup_unregistered_flag_from_module_dicts( + self, flag_obj: Flag + ) -> None: + """Cleans up unregistered flags from all module -> [flags] dictionaries. + + If flag_obj is registered under either its long name or short name, it + won't be removed from the dictionaries. + + Args: + flag_obj: Flag, the Flag instance to clean up for. + """ + if self._flag_is_registered(flag_obj): + return + for flags_by_module_dict in (self.flags_by_module_dict(), + self.flags_by_module_id_dict(), + self.key_flags_by_module_dict()): + for flags_in_module in flags_by_module_dict.values(): + # While (as opposed to if) takes care of multiple occurrences of a + # flag in the list for the same module. + while flag_obj in flags_in_module: + flags_in_module.remove(flag_obj) + + def get_flags_for_module(self, module: Union[Text, Any]) -> List[Flag]: + """Returns the list of flags defined by a module. + + Args: + module: module|str, the module to get flags from. + + Returns: + [Flag], a new list of Flag instances. Caller may update this list as + desired: none of those changes will affect the internals of this + FlagValue instance. + """ + if not isinstance(module, str): + module = module.__name__ + if module == '__main__': + module = sys.argv[0] + + return list(self.flags_by_module_dict().get(module, [])) + + def get_key_flags_for_module(self, module: Union[Text, Any]) -> List[Flag]: + """Returns the list of key flags for a module. + + Args: + module: module|str, the module to get key flags from. + + Returns: + [Flag], a new list of Flag instances. Caller may update this list as + desired: none of those changes will affect the internals of this + FlagValue instance. + """ + if not isinstance(module, str): + module = module.__name__ + if module == '__main__': + module = sys.argv[0] + + # Any flag is a key flag for the module that defined it. NOTE: + # key_flags is a fresh list: we can update it without affecting the + # internals of this FlagValues object. + key_flags = self.get_flags_for_module(module) + + # Take into account flags explicitly declared as key for a module. + for flag in self.key_flags_by_module_dict().get(module, []): + if flag not in key_flags: + key_flags.append(flag) + return key_flags + + # TODO(yileiyang): Restrict default to Optional[Text]. + def find_module_defining_flag( + self, flagname: Text, default: Optional[_T] = None + ) -> Union[str, Optional[_T]]: + """Return the name of the module defining this flag, or default. + + Args: + flagname: str, name of the flag to lookup. + default: Value to return if flagname is not defined. Defaults to None. + + Returns: + The name of the module which registered the flag with this name. + If no such module exists (i.e. no flag with this name exists), + we return default. + """ + registered_flag = self._flags().get(flagname) + if registered_flag is None: + return default + for module, flags in self.flags_by_module_dict().items(): + for flag in flags: + # It must compare the flag with the one in _flags. This is because a + # flag might be overridden only for its long name (or short name), + # and only its short name (or long name) is considered registered. + if (flag.name == registered_flag.name and + flag.short_name == registered_flag.short_name): + return module + return default + + # TODO(yileiyang): Restrict default to Optional[Text]. + def find_module_id_defining_flag( + self, flagname: Text, default: Optional[_T] = None + ) -> Union[int, Optional[_T]]: + """Return the ID of the module defining this flag, or default. + + Args: + flagname: str, name of the flag to lookup. + default: Value to return if flagname is not defined. Defaults to None. + + Returns: + The ID of the module which registered the flag with this name. + If no such module exists (i.e. no flag with this name exists), + we return default. + """ + registered_flag = self._flags().get(flagname) + if registered_flag is None: + return default + for module_id, flags in self.flags_by_module_id_dict().items(): + for flag in flags: + # It must compare the flag with the one in _flags. This is because a + # flag might be overridden only for its long name (or short name), + # and only its short name (or long name) is considered registered. + if (flag.name == registered_flag.name and + flag.short_name == registered_flag.short_name): + return module_id + return default + + def _register_unknown_flag_setter( + self, setter: Callable[[str, Any], None] + ) -> None: + """Allow set default values for undefined flags. + + Args: + setter: Method(name, value) to call to __setattr__ an unknown flag. Must + raise NameError or ValueError for invalid name/value. + """ + self.__dict__['__set_unknown'] = setter + + def _set_unknown_flag(self, name: str, value: _T) -> _T: + """Returns value if setting flag |name| to |value| returned True. + + Args: + name: str, name of the flag to set. + value: Value to set. + + Returns: + Flag value on successful call. + + Raises: + UnrecognizedFlagError + IllegalFlagValueError + """ + setter = self.__dict__['__set_unknown'] + if setter: + try: + setter(name, value) + return value + except (TypeError, ValueError): # Flag value is not valid. + raise _exceptions.IllegalFlagValueError( + '"{1}" is not valid for --{0}'.format(name, value)) + except NameError: # Flag name is not valid. + pass + raise _exceptions.UnrecognizedFlagError(name, value) + + def append_flag_values(self, flag_values: 'FlagValues') -> None: + """Appends flags registered in another FlagValues instance. + + Args: + flag_values: FlagValues, the FlagValues instance from which to copy flags. + """ + for flag_name, flag in flag_values._flags().items(): # pylint: disable=protected-access + # Each flags with short_name appears here twice (once under its + # normal name, and again with its short name). To prevent + # problems (DuplicateFlagError) with double flag registration, we + # perform a check to make sure that the entry we're looking at is + # for its normal name. + if flag_name == flag.name: + try: + self[flag_name] = flag + except _exceptions.DuplicateFlagError: + raise _exceptions.DuplicateFlagError.from_flag( + flag_name, self, other_flag_values=flag_values) + + def remove_flag_values( + self, flag_values: 'Union[FlagValues, Iterable[Text]]' + ) -> None: + """Remove flags that were previously appended from another FlagValues. + + Args: + flag_values: FlagValues, the FlagValues instance containing flags to + remove. + """ + for flag_name in flag_values: + self.__delattr__(flag_name) + + def __setitem__(self, name: Text, flag: Flag) -> None: + """Registers a new flag variable.""" + fl = self._flags() + if not isinstance(flag, _flag.Flag): + raise _exceptions.IllegalFlagValueError( + f'Expect Flag instances, found type {type(flag)}. ' + "Maybe you didn't mean to use FlagValue.__setitem__?") + if not isinstance(name, str): + raise _exceptions.Error('Flag name must be a string') + if not name: + raise _exceptions.Error('Flag name cannot be empty') + if ' ' in name: + raise _exceptions.Error('Flag name cannot contain a space') + self._check_method_name_conflicts(name, flag) + if name in fl and not flag.allow_override and not fl[name].allow_override: + module, module_name = _helpers.get_calling_module_object_and_name() + if (self.find_module_defining_flag(name) == module_name and + id(module) != self.find_module_id_defining_flag(name)): + # If the flag has already been defined by a module with the same name, + # but a different ID, we can stop here because it indicates that the + # module is simply being imported a subsequent time. + return + raise _exceptions.DuplicateFlagError.from_flag(name, self) + # If a new flag overrides an old one, we need to cleanup the old flag's + # modules if it's not registered. + flags_to_cleanup = set() + short_name: str = flag.short_name # pytype: disable=annotation-type-mismatch + if short_name is not None: + if (short_name in fl and not flag.allow_override and + not fl[short_name].allow_override): + raise _exceptions.DuplicateFlagError.from_flag(short_name, self) + if short_name in fl and fl[short_name] != flag: + flags_to_cleanup.add(fl[short_name]) + fl[short_name] = flag + if (name not in fl # new flag + or fl[name].using_default_value or not flag.using_default_value): + if name in fl and fl[name] != flag: + flags_to_cleanup.add(fl[name]) + fl[name] = flag + for f in flags_to_cleanup: + self._cleanup_unregistered_flag_from_module_dicts(f) + + def __dir__(self) -> List[Text]: + """Returns list of names of all defined flags. + + Useful for TAB-completion in ipython. + + Returns: + [str], a list of names of all defined flags. + """ + return sorted(self.__dict__['__flags']) + + def __getitem__(self, name: Text) -> Flag: + """Returns the Flag object for the flag --name.""" + return self._flags()[name] + + def _hide_flag(self, name): + """Marks the flag --name as hidden.""" + self.__dict__['__hiddenflags'].add(name) + + def __getattr__(self, name: Text) -> Any: + """Retrieves the 'value' attribute of the flag --name.""" + fl = self._flags() + if name not in fl: + raise AttributeError(name) + if name in self.__dict__['__hiddenflags']: + raise AttributeError(name) + + if self.__dict__['__flags_parsed'] or fl[name].present: + return fl[name].value + else: + raise _exceptions.UnparsedFlagAccessError( + 'Trying to access flag --%s before flags were parsed.' % name) + + def __setattr__(self, name: Text, value: _T) -> _T: + """Sets the 'value' attribute of the flag --name.""" + self._set_attributes(**{name: value}) + return value + + def _set_attributes(self, **attributes: Any) -> None: + """Sets multiple flag values together, triggers validators afterwards.""" + fl = self._flags() + known_flag_vals = {} + known_flag_used_defaults = {} + try: + for name, value in attributes.items(): + if name in self.__dict__['__hiddenflags']: + raise AttributeError(name) + if name in fl: + orig = fl[name].value + fl[name].value = value + known_flag_vals[name] = orig + else: + self._set_unknown_flag(name, value) + for name in known_flag_vals: + self._assert_validators(fl[name].validators) + known_flag_used_defaults[name] = fl[name].using_default_value + fl[name].using_default_value = False + except: + for name, orig in known_flag_vals.items(): + fl[name].value = orig + for name, orig in known_flag_used_defaults.items(): + fl[name].using_default_value = orig + # NOTE: We do not attempt to undo unknown flag side effects because we + # cannot reliably undo the user-configured behavior. + raise + + def validate_all_flags(self) -> None: + """Verifies whether all flags pass validation. + + Raises: + AttributeError: Raised if validators work with a non-existing flag. + IllegalFlagValueError: Raised if validation fails for at least one + validator. + """ + all_validators = set() + for flag in self._flags().values(): + all_validators.update(flag.validators) + self._assert_validators(all_validators) + + def _assert_validators( + self, validators: Iterable[_validators_classes.Validator] + ) -> None: + """Asserts if all validators in the list are satisfied. + + It asserts validators in the order they were created. + + Args: + validators: Iterable(validators.Validator), validators to be verified. + + Raises: + AttributeError: Raised if validators work with a non-existing flag. + IllegalFlagValueError: Raised if validation fails for at least one + validator. + """ + messages = [] + bad_flags = set() + for validator in sorted( + validators, key=lambda validator: validator.insertion_index): + try: + if isinstance(validator, _validators_classes.SingleFlagValidator): + if validator.flag_name in bad_flags: + continue + elif isinstance(validator, _validators_classes.MultiFlagsValidator): + if bad_flags & set(validator.flag_names): + continue + validator.verify(self) + except _exceptions.ValidationError as e: + if isinstance(validator, _validators_classes.SingleFlagValidator): + bad_flags.add(validator.flag_name) + elif isinstance(validator, _validators_classes.MultiFlagsValidator): + bad_flags.update(set(validator.flag_names)) + message = validator.print_flags_with_values(self) + messages.append('%s: %s' % (message, str(e))) + if messages: + raise _exceptions.IllegalFlagValueError('\n'.join(messages)) + + def __delattr__(self, flag_name: Text) -> None: + """Deletes a previously-defined flag from a flag object. + + This method makes sure we can delete a flag by using + + del FLAGS. + + E.g., + + flags.DEFINE_integer('foo', 1, 'Integer flag.') + del flags.FLAGS.foo + + If a flag is also registered by its the other name (long name or short + name), the other name won't be deleted. + + Args: + flag_name: str, the name of the flag to be deleted. + + Raises: + AttributeError: Raised when there is no registered flag named flag_name. + """ + fl = self._flags() + if flag_name not in fl: + raise AttributeError(flag_name) + + flag_obj = fl[flag_name] + del fl[flag_name] + + self._cleanup_unregistered_flag_from_module_dicts(flag_obj) + + def set_default(self, name: Text, value: Any) -> None: + """Changes the default value of the named flag object. + + The flag's current value is also updated if the flag is currently using + the default value, i.e. not specified in the command line, and not set + by FLAGS.name = value. + + Args: + name: str, the name of the flag to modify. + value: The new default value. + + Raises: + UnrecognizedFlagError: Raised when there is no registered flag named name. + IllegalFlagValueError: Raised when value is not valid. + """ + fl = self._flags() + if name not in fl: + self._set_unknown_flag(name, value) + return + fl[name]._set_default(value) # pylint: disable=protected-access + self._assert_validators(fl[name].validators) + + def __contains__(self, name: Text) -> bool: + """Returns True if name is a value (flag) in the dict.""" + return name in self._flags() + + def __len__(self) -> int: + return len(self.__dict__['__flags']) + + def __iter__(self) -> Iterator[Text]: + return iter(self._flags()) + + def __call__( + self, argv: Sequence[Text], known_only: bool = False + ) -> List[Text]: + """Parses flags from argv; stores parsed flags into this FlagValues object. + + All unparsed arguments are returned. + + Args: + argv: a tuple/list of strings. + known_only: bool, if True, parse and remove known flags; return the rest + untouched. Unknown flags specified by --undefok are not returned. + + Returns: + The list of arguments not parsed as options, including argv[0]. + + Raises: + Error: Raised on any parsing error. + TypeError: Raised on passing wrong type of arguments. + ValueError: Raised on flag value parsing error. + """ + if isinstance(argv, (str, bytes)): + raise TypeError( + 'argv should be a tuple/list of strings, not bytes or string.') + if not argv: + raise ValueError( + 'argv cannot be an empty list, and must contain the program name as ' + 'the first element.') + + # This pre parses the argv list for --flagfile=<> options. + program_name = argv[0] + args = self.read_flags_from_files(argv[1:], force_gnu=False) + + # Parse the arguments. + unknown_flags, unparsed_args = self._parse_args(args, known_only) + + # Handle unknown flags by raising UnrecognizedFlagError. + # Note some users depend on us raising this particular error. + for name, value in unknown_flags: + suggestions = _helpers.get_flag_suggestions(name, list(self)) + raise _exceptions.UnrecognizedFlagError( + name, value, suggestions=suggestions) + + self.mark_as_parsed() + self.validate_all_flags() + return [program_name] + unparsed_args + + def __getstate__(self) -> Any: + raise TypeError("can't pickle FlagValues") + + def __copy__(self) -> Any: + raise TypeError('FlagValues does not support shallow copies. ' + 'Use absl.testing.flagsaver or copy.deepcopy instead.') + + def __deepcopy__(self, memo) -> Any: + result = object.__new__(type(self)) + result.__dict__.update(copy.deepcopy(self.__dict__, memo)) + return result + + def _set_is_retired_flag_func(self, is_retired_flag_func): + """Sets a function for checking retired flags. + + Do not use it. This is a private absl API used to check retired flags + registered by the absl C++ flags library. + + Args: + is_retired_flag_func: Callable(str) -> (bool, bool), a function takes flag + name as parameter, returns a tuple (is_retired, type_is_bool). + """ + self.__dict__['__is_retired_flag_func'] = is_retired_flag_func + + def _parse_args( + self, args: List[str], known_only: bool + ) -> Tuple[List[Tuple[Optional[str], Any]], List[str]]: + """Helper function to do the main argument parsing. + + This function goes through args and does the bulk of the flag parsing. + It will find the corresponding flag in our flag dictionary, and call its + .parse() method on the flag value. + + Args: + args: [str], a list of strings with the arguments to parse. + known_only: bool, if True, parse and remove known flags; return the rest + untouched. Unknown flags specified by --undefok are not returned. + + Returns: + A tuple with the following: + unknown_flags: List of (flag name, arg) for flags we don't know about. + unparsed_args: List of arguments we did not parse. + + Raises: + Error: Raised on any parsing error. + ValueError: Raised on flag value parsing error. + """ + unparsed_names_and_args = [] # A list of (flag name or None, arg). + undefok = set() + retired_flag_func = self.__dict__['__is_retired_flag_func'] + + flag_dict = self._flags() + args = iter(args) + for arg in args: + value = None + + def get_value(): + # pylint: disable=cell-var-from-loop + try: + return next(args) if value is None else value + except StopIteration: + raise _exceptions.Error('Missing value for flag ' + arg) # pylint: disable=undefined-loop-variable + + if not arg.startswith('-'): + # A non-argument: default is break, GNU is skip. + unparsed_names_and_args.append((None, arg)) + if self.is_gnu_getopt(): + continue + else: + break + + if arg == '--': + if known_only: + unparsed_names_and_args.append((None, arg)) + break + + # At this point, arg must start with '-'. + if arg.startswith('--'): + arg_without_dashes = arg[2:] + else: + arg_without_dashes = arg[1:] + + if '=' in arg_without_dashes: + name, value = arg_without_dashes.split('=', 1) + else: + name, value = arg_without_dashes, None + + if not name: + # The argument is all dashes (including one dash). + unparsed_names_and_args.append((None, arg)) + if self.is_gnu_getopt(): + continue + else: + break + + # --undefok is a special case. + if name == 'undefok': + value = get_value() + undefok.update(v.strip() for v in value.split(',')) + undefok.update('no' + v.strip() for v in value.split(',')) + continue + + flag = flag_dict.get(name) + if flag is not None: + if flag.boolean and value is None: + value = 'true' + else: + value = get_value() + elif name.startswith('no') and len(name) > 2: + # Boolean flags can take the form of --noflag, with no value. + noflag = flag_dict.get(name[2:]) + if noflag is not None and noflag.boolean: + if value is not None: + raise ValueError(arg + ' does not take an argument') + flag = noflag + value = 'false' + + if retired_flag_func and flag is None: + is_retired, is_bool = retired_flag_func(name) + + # If we didn't recognize that flag, but it starts with + # "no" then maybe it was a boolean flag specified in the + # --nofoo form. + if not is_retired and name.startswith('no'): + is_retired, is_bool = retired_flag_func(name[2:]) + is_retired = is_retired and is_bool + + if is_retired: + if not is_bool and value is None: + # This happens when a non-bool retired flag is specified + # in format of "--flag value". + get_value() + logging.error( + 'Flag "%s" is retired and should no longer be specified. See ' + 'https://abseil.io/tips/90.', + name, + ) + continue + + if flag is not None: + # LINT.IfChange + flag.parse(value) + flag.using_default_value = False + # LINT.ThenChange(../testing/flagsaver.py:flag_override_parsing) + else: + unparsed_names_and_args.append((name, arg)) + + unknown_flags = [] + unparsed_args = [] + for name, arg in unparsed_names_and_args: + if name is None: + # Positional arguments. + unparsed_args.append(arg) + elif name in undefok: + # Remove undefok flags. + continue + else: + # This is an unknown flag. + if known_only: + unparsed_args.append(arg) + else: + unknown_flags.append((name, arg)) + + unparsed_args.extend(list(args)) + return unknown_flags, unparsed_args + + def is_parsed(self) -> bool: + """Returns whether flags were parsed.""" + return self.__dict__['__flags_parsed'] + + def mark_as_parsed(self) -> None: + """Explicitly marks flags as parsed. + + Use this when the caller knows that this FlagValues has been parsed as if + a ``__call__()`` invocation has happened. This is only a public method for + use by things like appcommands which do additional command like parsing. + """ + self.__dict__['__flags_parsed'] = True + + def unparse_flags(self) -> None: + """Unparses all flags to the point before any FLAGS(argv) was called.""" + for f in self._flags().values(): + f.unparse() + # We log this message before marking flags as unparsed to avoid a + # problem when the logging library causes flags access. + logging.info('unparse_flags() called; flags access will now raise errors.') + self.__dict__['__flags_parsed'] = False + self.__dict__['__unparse_flags_called'] = True + + def flag_values_dict(self) -> Dict[Text, Any]: + """Returns a dictionary that maps flag names to flag values.""" + return {name: flag.value for name, flag in self._flags().items()} + + def __str__(self): + """Returns a help string for all known flags.""" + return self.get_help() + + def get_help( + self, prefix: Text = '', include_special_flags: bool = True + ) -> Text: + """Returns a help string for all known flags. + + Args: + prefix: str, per-line output prefix. + include_special_flags: bool, whether to include description of + SPECIAL_FLAGS, i.e. --flagfile and --undefok. + + Returns: + str, formatted help message. + """ + flags_by_module = self.flags_by_module_dict() + if flags_by_module: + modules = sorted(flags_by_module) + # Print the help for the main module first, if possible. + main_module = sys.argv[0] + if main_module in modules: + modules.remove(main_module) + modules = [main_module] + modules + return self._get_help_for_modules(modules, prefix, include_special_flags) + else: + output_lines = [] + # Just print one long list of flags. + values = self._flags().values() + if include_special_flags: + values = itertools.chain( + values, _helpers.SPECIAL_FLAGS._flags().values() # pylint: disable=protected-access # pytype: disable=attribute-error + ) + self._render_flag_list(values, output_lines, prefix) + return '\n'.join(output_lines) + + def _get_help_for_modules(self, modules, prefix, include_special_flags): + """Returns the help string for a list of modules. + + Private to absl.flags package. + + Args: + modules: List[str], a list of modules to get the help string for. + prefix: str, a string that is prepended to each generated help line. + include_special_flags: bool, whether to include description of + SPECIAL_FLAGS, i.e. --flagfile and --undefok. + """ + output_lines = [] + for module in modules: + self._render_our_module_flags(module, output_lines, prefix) + if include_special_flags: + self._render_module_flags( + 'absl.flags', + _helpers.SPECIAL_FLAGS._flags().values(), # pylint: disable=protected-access # pytype: disable=attribute-error + output_lines, + prefix, + ) + return '\n'.join(output_lines) + + def _render_module_flags(self, module, flags, output_lines, prefix=''): + """Returns a help string for a given module.""" + if not isinstance(module, str): + module = module.__name__ + output_lines.append('\n%s%s:' % (prefix, module)) + self._render_flag_list(flags, output_lines, prefix + ' ') + + def _render_our_module_flags(self, module, output_lines, prefix=''): + """Returns a help string for a given module.""" + flags = self.get_flags_for_module(module) + if flags: + self._render_module_flags(module, flags, output_lines, prefix) + + def _render_our_module_key_flags(self, module, output_lines, prefix=''): + """Returns a help string for the key flags of a given module. + + Args: + module: module|str, the module to render key flags for. + output_lines: [str], a list of strings. The generated help message lines + will be appended to this list. + prefix: str, a string that is prepended to each generated help line. + """ + key_flags = self.get_key_flags_for_module(module) + if key_flags: + self._render_module_flags(module, key_flags, output_lines, prefix) + + def module_help(self, module: Any) -> Text: + """Describes the key flags of a module. + + Args: + module: module|str, the module to describe the key flags for. + + Returns: + str, describing the key flags of a module. + """ + helplist = [] + self._render_our_module_key_flags(module, helplist) + return '\n'.join(helplist) + + def main_module_help(self) -> Text: + """Describes the key flags of the main module. + + Returns: + str, describing the key flags of the main module. + """ + return self.module_help(sys.argv[0]) + + def _render_flag_list(self, flaglist, output_lines, prefix=' '): + fl = self._flags() + special_fl = _helpers.SPECIAL_FLAGS._flags() # pylint: disable=protected-access # pytype: disable=attribute-error + flaglist = [(flag.name, flag) for flag in flaglist] + flaglist.sort() + flagset = {} + for (name, flag) in flaglist: + # It's possible this flag got deleted or overridden since being + # registered in the per-module flaglist. Check now against the + # canonical source of current flag information, the _flags. + if fl.get(name, None) != flag and special_fl.get(name, None) != flag: + # a different flag is using this name now + continue + # only print help once + if flag in flagset: + continue + flagset[flag] = 1 + flaghelp = '' + if flag.short_name: + flaghelp += '-%s,' % flag.short_name + if flag.boolean: + flaghelp += '--[no]%s:' % flag.name + else: + flaghelp += '--%s:' % flag.name + flaghelp += ' ' + if flag.help: + flaghelp += flag.help + flaghelp = _helpers.text_wrap( + flaghelp, indent=prefix + ' ', firstline_indent=prefix) + if flag.default_as_str: + flaghelp += '\n' + flaghelp += _helpers.text_wrap( + '(default: %s)' % flag.default_as_str, indent=prefix + ' ') + if flag.parser.syntactic_help: + flaghelp += '\n' + flaghelp += _helpers.text_wrap( + '(%s)' % flag.parser.syntactic_help, indent=prefix + ' ') + output_lines.append(flaghelp) + + def get_flag_value(self, name: Text, default: Any) -> Any: # pylint: disable=invalid-name + """Returns the value of a flag (if not None) or a default value. + + Args: + name: str, the name of a flag. + default: Default value to use if the flag value is None. + + Returns: + Requested flag value or default. + """ + + value = self.__getattr__(name) + if value is not None: # Can't do if not value, b/c value might be '0' or "" + return value + else: + return default + + def _is_flag_file_directive(self, flag_string): + """Checks whether flag_string contain a --flagfile= directive.""" + if isinstance(flag_string, str): + if flag_string.startswith('--flagfile='): + return 1 + elif flag_string == '--flagfile': + return 1 + elif flag_string.startswith('-flagfile='): + return 1 + elif flag_string == '-flagfile': + return 1 + else: + return 0 + return 0 + + def _extract_filename(self, flagfile_str): + """Returns filename from a flagfile_str of form -[-]flagfile=filename. + + The cases of --flagfile foo and -flagfile foo shouldn't be hitting + this function, as they are dealt with in the level above this + function. + + Args: + flagfile_str: str, the flagfile string. + + Returns: + str, the filename from a flagfile_str of form -[-]flagfile=filename. + + Raises: + Error: Raised when illegal --flagfile is provided. + """ + if flagfile_str.startswith('--flagfile='): + return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) + elif flagfile_str.startswith('-flagfile='): + return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) + else: + raise _exceptions.Error('Hit illegal --flagfile type: %s' % flagfile_str) + + def _get_flag_file_lines(self, filename, parsed_file_stack=None): + """Returns the useful (!=comments, etc) lines from a file with flags. + + Args: + filename: str, the name of the flag file. + parsed_file_stack: [str], a list of the names of the files that we have + recursively encountered at the current depth. MUTATED BY THIS FUNCTION + (but the original value is preserved upon successfully returning from + function call). + + Returns: + List of strings. See the note below. + + NOTE(springer): This function checks for a nested --flagfile= + tag and handles the lower file recursively. It returns a list of + all the lines that _could_ contain command flags. This is + EVERYTHING except whitespace lines and comments (lines starting + with '#' or '//'). + """ + # For consistency with the cpp version, ignore empty values. + if not filename: + return [] + if parsed_file_stack is None: + parsed_file_stack = [] + # We do a little safety check for reparsing a file we've already encountered + # at a previous depth. + if filename in parsed_file_stack: + sys.stderr.write('Warning: Hit circular flagfile dependency. Ignoring' + ' flagfile: %s\n' % (filename,)) + return [] + else: + parsed_file_stack.append(filename) + + line_list = [] # All line from flagfile. + flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags. + try: + file_obj = open(filename, 'r') + except IOError as e_msg: + raise _exceptions.CantOpenFlagFileError( + 'ERROR:: Unable to open flagfile: %s' % e_msg) + + with file_obj: + line_list = file_obj.readlines() + + # This is where we check each line in the file we just read. + for line in line_list: + if line.isspace(): + pass + # Checks for comment (a line that starts with '#'). + elif line.startswith('#') or line.startswith('//'): + pass + # Checks for a nested "--flagfile=" flag in the current file. + # If we find one, recursively parse down into that file. + elif self._is_flag_file_directive(line): + sub_filename = self._extract_filename(line) + included_flags = self._get_flag_file_lines( + sub_filename, parsed_file_stack=parsed_file_stack) + flag_line_list.extend(included_flags) + else: + # Any line that's not a comment or a nested flagfile should get + # copied into 2nd position. This leaves earlier arguments + # further back in the list, thus giving them higher priority. + flag_line_list.append(line.strip()) + + parsed_file_stack.pop() + return flag_line_list + + def read_flags_from_files( + self, argv: Sequence[Text], force_gnu: bool = True + ) -> List[Text]: + """Processes command line args, but also allow args to be read from file. + + Args: + argv: [str], a list of strings, usually sys.argv[1:], which may contain + one or more flagfile directives of the form --flagfile="./filename". + Note that the name of the program (sys.argv[0]) should be omitted. + force_gnu: bool, if False, --flagfile parsing obeys the + FLAGS.is_gnu_getopt() value. If True, ignore the value and always follow + gnu_getopt semantics. + + Returns: + A new list which has the original list combined with what we read + from any flagfile(s). + + Raises: + IllegalFlagValueError: Raised when --flagfile is provided with no + argument. + + This function is called by FLAGS(argv). + It scans the input list for a flag that looks like: + --flagfile=. Then it opens , reads all valid key + and value pairs and inserts them into the input list in exactly the + place where the --flagfile arg is found. + + Note that your application's flags are still defined the usual way + using absl.flags DEFINE_flag() type functions. + + Notes (assuming we're getting a commandline of some sort as our input): + + * For duplicate flags, the last one we hit should "win". + * Since flags that appear later win, a flagfile's settings can be "weak" + if the --flagfile comes at the beginning of the argument sequence, + and it can be "strong" if the --flagfile comes at the end. + * A further "--flagfile=" CAN be nested in a flagfile. + It will be expanded in exactly the spot where it is found. + * In a flagfile, a line beginning with # or // is a comment. + * Entirely blank lines _should_ be ignored. + """ + rest_of_args = argv + new_argv = [] + while rest_of_args: + current_arg = rest_of_args[0] + rest_of_args = rest_of_args[1:] + if self._is_flag_file_directive(current_arg): + # This handles the case of -(-)flagfile foo. In this case the + # next arg really is part of this one. + if current_arg == '--flagfile' or current_arg == '-flagfile': + if not rest_of_args: + raise _exceptions.IllegalFlagValueError( + '--flagfile with no argument') + flag_filename = os.path.expanduser(rest_of_args[0]) + rest_of_args = rest_of_args[1:] + else: + # This handles the case of (-)-flagfile=foo. + flag_filename = self._extract_filename(current_arg) + new_argv.extend(self._get_flag_file_lines(flag_filename)) + else: + new_argv.append(current_arg) + # Stop parsing after '--', like getopt and gnu_getopt. + if current_arg == '--': + break + # Stop parsing after a non-flag, like getopt. + if not current_arg.startswith('-'): + if not force_gnu and not self.__dict__['__use_gnu_getopt']: + break + else: + if ('=' not in current_arg and rest_of_args and + not rest_of_args[0].startswith('-')): + # If this is an occurrence of a legitimate --x y, skip the value + # so that it won't be mistaken for a standalone arg. + fl = self._flags() + name = current_arg.lstrip('-') + if name in fl and not fl[name].boolean: + current_arg = rest_of_args[0] + rest_of_args = rest_of_args[1:] + new_argv.append(current_arg) + + if rest_of_args: + new_argv.extend(rest_of_args) + + return new_argv + + def flags_into_string(self) -> Text: + """Returns a string with the flags assignments from this FlagValues object. + + This function ignores flags whose value is None. Each flag + assignment is separated by a newline. + + NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString + from https://github.com/gflags/gflags. + + Returns: + str, the string with the flags assignments from this FlagValues object. + The flags are ordered by (module_name, flag_name). + """ + module_flags = sorted(self.flags_by_module_dict().items()) + s = '' + for unused_module_name, flags in module_flags: + flags = sorted(flags, key=lambda f: f.name) + for flag in flags: + if flag.value is not None: + s += flag.serialize() + '\n' + return s + + def append_flags_into_file(self, filename: Text) -> None: + """Appends all flags assignments from this FlagInfo object to a file. + + Output will be in the format of a flagfile. + + NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile + from https://github.com/gflags/gflags. + + Args: + filename: str, name of the file. + """ + with open(filename, 'a') as out_file: + out_file.write(self.flags_into_string()) + + def write_help_in_xml_format(self, outfile: Optional[TextIO] = None) -> None: + """Outputs flag documentation in XML format. + + NOTE: We use element names that are consistent with those used by + the C++ command-line flag library, from + https://github.com/gflags/gflags. + We also use a few new elements (e.g., ), but we do not + interfere / overlap with existing XML elements used by the C++ + library. Please maintain this consistency. + + Args: + outfile: File object we write to. Default None means sys.stdout. + """ + doc = minidom.Document() + all_flag = doc.createElement('AllFlags') + doc.appendChild(all_flag) + + all_flag.appendChild( + _helpers.create_xml_dom_element(doc, 'program', + os.path.basename(sys.argv[0]))) + + usage_doc = sys.modules['__main__'].__doc__ + if not usage_doc: + usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] + else: + usage_doc = usage_doc.replace('%s', sys.argv[0]) + all_flag.appendChild( + _helpers.create_xml_dom_element(doc, 'usage', usage_doc)) + + # Get list of key flags for the main module. + key_flags = self.get_key_flags_for_module(sys.argv[0]) + + # Sort flags by declaring module name and next by flag name. + flags_by_module = self.flags_by_module_dict() + all_module_names = list(flags_by_module.keys()) + all_module_names.sort() + for module_name in all_module_names: + flag_list = [(f.name, f) for f in flags_by_module[module_name]] + flag_list.sort() + for unused_flag_name, flag in flag_list: + is_key = flag in key_flags + all_flag.appendChild( + flag._create_xml_dom_element( # pylint: disable=protected-access + doc, + module_name, + is_key=is_key)) + + outfile = outfile or sys.stdout + outfile.write( + doc.toprettyxml(indent=' ', encoding='utf-8').decode('utf-8')) + outfile.flush() + + def _check_method_name_conflicts(self, name: str, flag: Flag): + if flag.allow_using_method_names: + return + short_name = flag.short_name + flag_names = {name} if short_name is None else {name, short_name} + for flag_name in flag_names: + if flag_name in self.__dict__['__banned_flag_names']: + raise _exceptions.FlagNameConflictsWithMethodError( + 'Cannot define a flag named "{name}". It conflicts with a method ' + 'on class "{class_name}". To allow defining it, use ' + 'allow_using_method_names and access the flag value with ' + "FLAGS['{name}'].value. FLAGS.{name} returns the method, " + 'not the flag value.'.format( + name=flag_name, class_name=type(self).__name__)) + + +FLAGS = FlagValues() + + +class FlagHolder(Generic[_T]): + """Holds a defined flag. + + This facilitates a cleaner api around global state. Instead of:: + + flags.DEFINE_integer('foo', ...) + flags.DEFINE_integer('bar', ...) + + def method(): + # prints parsed value of 'bar' flag + print(flags.FLAGS.foo) + # runtime error due to typo or possibly bad coding style. + print(flags.FLAGS.baz) + + it encourages code like:: + + _FOO_FLAG = flags.DEFINE_integer('foo', ...) + _BAR_FLAG = flags.DEFINE_integer('bar', ...) + + def method(): + print(_FOO_FLAG.value) + print(_BAR_FLAG.value) + + since the name of the flag appears only once in the source code. + """ + + value: _T + + def __init__( + self, + flag_values: FlagValues, + flag: Flag[_T], + ensure_non_none_value: bool = False, + ): + """Constructs a FlagHolder instance providing typesafe access to flag. + + Args: + flag_values: The container the flag is registered to. + flag: The flag object for this flag. + ensure_non_none_value: Is the value of the flag allowed to be None. + """ + self._flagvalues = flag_values + # We take the entire flag object, but only keep the name. Why? + # - We want FlagHolder[T] to be generic container + # - flag_values contains all flags, so has no reference to T. + # - typecheckers don't like to see a generic class where none of the ctor + # arguments refer to the generic type. + self._name = flag.name + # We intentionally do NOT check if the default value is None. + # This allows future use of this for "required flags with None default" + self._ensure_non_none_value = ensure_non_none_value + + def __eq__(self, other): + raise TypeError( + "unsupported operand type(s) for ==: '{0}' and '{1}' " + "(did you mean to use '{0}.value' instead?)".format( + type(self).__name__, type(other).__name__)) + + def __bool__(self): + raise TypeError( + "bool() not supported for instances of type '{0}' " + "(did you mean to use '{0}.value' instead?)".format( + type(self).__name__)) + + __nonzero__ = __bool__ + + @property + def name(self) -> Text: + return self._name + + @property + def value(self) -> _T: + """Returns the value of the flag. + + If ``_ensure_non_none_value`` is ``True``, then return value is not + ``None``. + + Raises: + UnparsedFlagAccessError: if flag parsing has not finished. + IllegalFlagValueError: if value is None unexpectedly. + """ + val = getattr(self._flagvalues, self._name) + if self._ensure_non_none_value and val is None: + raise _exceptions.IllegalFlagValueError( + 'Unexpected None value for flag %s' % self._name) + return val + + @property + def default(self) -> _T: + """Returns the default value of the flag.""" + return self._flagvalues[self._name].default + + @property + def present(self) -> bool: + """Returns True if the flag was parsed from command-line flags.""" + return bool(self._flagvalues[self._name].present) + + def serialize(self) -> Text: + """Returns a serialized representation of the flag.""" + return self._flagvalues[self._name].serialize() + + +def resolve_flag_ref( + flag_ref: Union[str, FlagHolder], flag_values: FlagValues +) -> Tuple[str, FlagValues]: + """Helper to validate and resolve a flag reference argument.""" + if isinstance(flag_ref, FlagHolder): + new_flag_values = flag_ref._flagvalues # pylint: disable=protected-access + if flag_values != FLAGS and flag_values != new_flag_values: + raise ValueError( + 'flag_values must not be customized when operating on a FlagHolder') + return flag_ref.name, new_flag_values + return flag_ref, flag_values + + +def resolve_flag_refs( + flag_refs: Sequence[Union[str, FlagHolder]], flag_values: FlagValues +) -> Tuple[List[str], FlagValues]: + """Helper to validate and resolve flag reference list arguments.""" + fv = None + names = [] + for ref in flag_refs: + if isinstance(ref, FlagHolder): + newfv = ref._flagvalues # pylint: disable=protected-access + name = ref.name + else: + newfv = flag_values + name = ref + if fv and fv != newfv: + raise ValueError( + 'multiple FlagValues instances used in invocation. ' + 'FlagHolders must be registered to the same FlagValues instance as ' + 'do flag names, if provided.') + fv = newfv + names.append(name) + return names, fv diff --git a/venv/lib/python3.10/site-packages/absl/flags/_helpers.py b/venv/lib/python3.10/site-packages/absl/flags/_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1ad559c0487b41d24f406c46e0da4d3240a1947f --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_helpers.py @@ -0,0 +1,421 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helper functions for Abseil Python flags library.""" + +import os +import re +import struct +import sys +import textwrap +import types +from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Sequence, Set +from xml.dom import minidom +# pylint: disable=g-import-not-at-top +try: + import fcntl +except ImportError: + fcntl = None +try: + # Importing termios will fail on non-unix platforms. + import termios +except ImportError: + termios = None +# pylint: enable=g-import-not-at-top + + +_DEFAULT_HELP_WIDTH = 80 # Default width of help output. +# Minimal "sane" width of help output. We assume that any value below 40 is +# unreasonable. +_MIN_HELP_WIDTH = 40 + +# Define the allowed error rate in an input string to get suggestions. +# +# We lean towards a high threshold because we tend to be matching a phrase, +# and the simple algorithm used here is geared towards correcting word +# spellings. +# +# For manual testing, consider " --list" which produced a large number +# of spurious suggestions when we used "least_errors > 0.5" instead of +# "least_erros >= 0.5". +_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50 + +# Characters that cannot appear or are highly discouraged in an XML 1.0 +# document. (See http://www.w3.org/TR/REC-xml/#charsets or +# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0) +_ILLEGAL_XML_CHARS_REGEX = re.compile( + u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]') + +# This is a set of module ids for the modules that disclaim key flags. +# This module is explicitly added to this set so that we never consider it to +# define key flag. +disclaim_module_ids: Set[int] = set([id(sys.modules[__name__])]) + + +# Define special flags here so that help may be generated for them. +# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module. +# Initialized inside flagvalues.py. +# NOTE: This cannot be annotated as its actual FlagValues type since this would +# create a circular dependency. +SPECIAL_FLAGS: Any = None + + +# This points to the flags module, initialized in flags/__init__.py. +# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into +# account. +FLAGS_MODULE: types.ModuleType = None + + +class _ModuleObjectAndName(NamedTuple): + """Module object and name. + + Fields: + - module: object, module object. + - module_name: str, module name. + """ + module: types.ModuleType + module_name: str + + +def get_module_object_and_name( + globals_dict: Dict[str, Any] +) -> _ModuleObjectAndName: + """Returns the module that defines a global environment, and its name. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + _ModuleObjectAndName - pair of module object & module name. + Returns (None, None) if the module could not be identified. + """ + name = globals_dict.get('__name__', None) + module = sys.modules.get(name, None) + # Pick a more informative name for the main module. + return _ModuleObjectAndName(module, + (sys.argv[0] if name == '__main__' else name)) + + +def get_calling_module_object_and_name() -> _ModuleObjectAndName: + """Returns the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + + Returns: + The module object that called into this one. + + Raises: + AssertionError: Raised when no calling module could be identified. + """ + for depth in range(1, sys.getrecursionlimit()): + # sys._getframe is the right thing to use here, as it's the best + # way to walk up the call stack. + globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access + module, module_name = get_module_object_and_name(globals_for_frame) + if id(module) not in disclaim_module_ids and module_name is not None: + return _ModuleObjectAndName(module, module_name) + raise AssertionError('No module was found') + + +def get_calling_module() -> str: + """Returns the name of the module that's calling into this module.""" + return get_calling_module_object_and_name().module_name + + +def create_xml_dom_element( + doc: minidom.Document, name: str, value: Any +) -> minidom.Element: + """Returns an XML DOM element with name and text value. + + Args: + doc: minidom.Document, the DOM document it should create nodes from. + name: str, the tag of XML element. + value: object, whose string representation will be used + as the value of the XML element. Illegal or highly discouraged xml 1.0 + characters are stripped. + + Returns: + An instance of minidom.Element. + """ + s = str(value) + if isinstance(value, bool): + # Display boolean values as the C++ flag library does: no caps. + s = s.lower() + # Remove illegal xml characters. + s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s) + + e = doc.createElement(name) + e.appendChild(doc.createTextNode(s)) + return e + + +def get_help_width() -> int: + """Returns the integer width of help lines that is used in TextWrap.""" + if not sys.stdout.isatty() or termios is None or fcntl is None: + return _DEFAULT_HELP_WIDTH + try: + data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, b'1234') + columns = struct.unpack('hh', data)[1] + # Emacs mode returns 0. + # Here we assume that any value below 40 is unreasonable. + if columns >= _MIN_HELP_WIDTH: + return columns + # Returning an int as default is fine, int(int) just return the int. + return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH)) + + except (TypeError, IOError, struct.error): + return _DEFAULT_HELP_WIDTH + + +def get_flag_suggestions( + attempt: Optional[str], longopt_list: Sequence[str] +) -> List[str]: + """Returns helpful similar matches for an invalid flag.""" + # Don't suggest on very short strings, or if no longopts are specified. + if len(attempt) <= 2 or not longopt_list: + return [] + + option_names = [v.split('=')[0] for v in longopt_list] + + # Find close approximations in flag prefixes. + # This also handles the case where the flag is spelled right but ambiguous. + distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option) + for option in option_names] + # t[0] is distance, and sorting by t[1] allows us to have stable output. + distances.sort() + + least_errors, _ = distances[0] + # Don't suggest excessively bad matches. + if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt): + return [] + + suggestions = [] + for errors, name in distances: + if errors == least_errors: + suggestions.append(name) + else: + break + return suggestions + + +def _damerau_levenshtein(a, b): + """Returns Damerau-Levenshtein edit distance from a to b.""" + memo = {} + + def distance(x, y): + """Recursively defined string distance with memoization.""" + if (x, y) in memo: + return memo[x, y] + if not x: + d = len(y) + elif not y: + d = len(x) + else: + d = min( + distance(x[1:], y) + 1, # correct an insertion error + distance(x, y[1:]) + 1, # correct a deletion error + distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character + if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]: + # Correct a transposition. + t = distance(x[2:], y[2:]) + 1 + if d > t: + d = t + + memo[x, y] = d + return d + return distance(a, b) + + +def text_wrap( + text: str, + length: Optional[int] = None, + indent: str = '', + firstline_indent: Optional[str] = None, +) -> str: + """Wraps a given text to a maximum line length and returns it. + + It turns lines that only contain whitespace into empty lines, keeps new lines, + and expands tabs using 4 spaces. + + Args: + text: str, text to wrap. + length: int, maximum length of a line, includes indentation. + If this is None then use get_help_width() + indent: str, indent for all but first line. + firstline_indent: str, indent for first line; if None, fall back to indent. + + Returns: + str, the wrapped text. + + Raises: + ValueError: Raised if indent or firstline_indent not shorter than length. + """ + # Get defaults where callee used None + if length is None: + length = get_help_width() + if indent is None: + indent = '' + if firstline_indent is None: + firstline_indent = indent + + if len(indent) >= length: + raise ValueError('Length of indent exceeds length') + if len(firstline_indent) >= length: + raise ValueError('Length of first line indent exceeds length') + + text = text.expandtabs(4) + + result = [] + # Create one wrapper for the first paragraph and one for subsequent + # paragraphs that does not have the initial wrapping. + wrapper = textwrap.TextWrapper( + width=length, initial_indent=firstline_indent, subsequent_indent=indent) + subsequent_wrapper = textwrap.TextWrapper( + width=length, initial_indent=indent, subsequent_indent=indent) + + # textwrap does not have any special treatment for newlines. From the docs: + # "...newlines may appear in the middle of a line and cause strange output. + # For this reason, text should be split into paragraphs (using + # str.splitlines() or similar) which are wrapped separately." + for paragraph in (p.strip() for p in text.splitlines()): + if paragraph: + result.extend(wrapper.wrap(paragraph)) + else: + result.append('') # Keep empty lines. + # Replace initial wrapper with wrapper for subsequent paragraphs. + wrapper = subsequent_wrapper + + return '\n'.join(result) + + +def flag_dict_to_args( + flag_map: Dict[str, Any], multi_flags: Optional[Set[str]] = None +) -> Iterable[str]: + """Convert a dict of values into process call parameters. + + This method is used to convert a dictionary into a sequence of parameters + for a binary that parses arguments using this module. + + Args: + flag_map: dict, a mapping where the keys are flag names (strings). + values are treated according to their type: + + * If value is ``None``, then only the name is emitted. + * If value is ``True``, then only the name is emitted. + * If value is ``False``, then only the name prepended with 'no' is + emitted. + * If value is a string then ``--name=value`` is emitted. + * If value is a collection, this will emit + ``--name=value1,value2,value3``, unless the flag name is in + ``multi_flags``, in which case this will emit + ``--name=value1 --name=value2 --name=value3``. + * Everything else is converted to string an passed as such. + + multi_flags: set, names (strings) of flags that should be treated as + multi-flags. + Yields: + sequence of string suitable for a subprocess execution. + """ + for key, value in flag_map.items(): + if value is None: + yield '--%s' % key + elif isinstance(value, bool): + if value: + yield '--%s' % key + else: + yield '--no%s' % key + elif isinstance(value, (bytes, type(u''))): + # We don't want strings to be handled like python collections. + yield '--%s=%s' % (key, value) + else: + # Now we attempt to deal with collections. + try: + if multi_flags and key in multi_flags: + for item in value: + yield '--%s=%s' % (key, str(item)) + else: + yield '--%s=%s' % (key, ','.join(str(item) for item in value)) + except TypeError: + # Default case. + yield '--%s=%s' % (key, value) + + +def trim_docstring(docstring: str) -> str: + """Removes indentation from triple-quoted strings. + + This is the function specified in PEP 257 to handle docstrings: + https://www.python.org/dev/peps/pep-0257/. + + Args: + docstring: str, a python docstring. + + Returns: + str, docstring with indentation removed. + """ + if not docstring: + return '' + + # If you've got a line longer than this you have other problems... + max_indent = 1 << 29 + + # Convert tabs to spaces (following the normal Python rules) + # and split into a list of lines: + lines = docstring.expandtabs().splitlines() + + # Determine minimum indentation (first line doesn't count): + indent = max_indent + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < max_indent: + for line in lines[1:]: + trimmed.append(line[indent:].rstrip()) + # Strip off trailing and leading blank lines: + while trimmed and not trimmed[-1]: + trimmed.pop() + while trimmed and not trimmed[0]: + trimmed.pop(0) + # Return a single string: + return '\n'.join(trimmed) + + +def doc_to_help(doc: str) -> str: + """Takes a __doc__ string and reformats it as help.""" + + # Get rid of starting and ending white space. Using lstrip() or even + # strip() could drop more than maximum of first line and right space + # of last line. + doc = doc.strip() + + # Get rid of all empty lines. + whitespace_only_line = re.compile('^[ \t]+$', re.M) + doc = whitespace_only_line.sub('', doc) + + # Cut out common space at line beginnings. + doc = trim_docstring(doc) + + # Just like this module's comment, comments tend to be aligned somehow. + # In other words they all start with the same amount of white space. + # 1) keep double new lines; + # 2) keep ws after new lines if not empty line; + # 3) all other new lines shall be changed to a space; + # Solution: Match new lines between non white space and replace with space. + doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M) + + return doc diff --git a/venv/lib/python3.10/site-packages/absl/flags/_validators.py b/venv/lib/python3.10/site-packages/absl/flags/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..2161284a8e284fdfbe42d0f0128e7068cb1ba85f --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_validators.py @@ -0,0 +1,352 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module to enforce different constraints on flags. + +Flags validators can be registered using following functions / decorators:: + + flags.register_validator + @flags.validator + flags.register_multi_flags_validator + @flags.multi_flags_validator + +Three convenience functions are also provided for common flag constraints:: + + flags.mark_flag_as_required + flags.mark_flags_as_required + flags.mark_flags_as_mutual_exclusive + flags.mark_bool_flags_as_mutual_exclusive + +See their docstring in this module for a usage manual. + +Do NOT import this module directly. Import the flags package and use the +aliases defined at the package level instead. +""" + +import warnings + +from absl.flags import _exceptions +from absl.flags import _flagvalues +from absl.flags import _validators_classes + + +def register_validator(flag_name, + checker, + message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """Adds a constraint, which will be enforced during program execution. + + The constraint is validated when flags are initially parsed, and after each + change of the corresponding flag's value. + + Args: + flag_name: str | FlagHolder, name or holder of the flag to be checked. + Positional-only parameter. + checker: callable, a function to validate the flag. + + * input - A single positional argument: The value of the corresponding + flag (string, boolean, etc. This value will be passed to checker + by the library). + * output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either ``return False`` or + ``raise flags.ValidationError(desired_error_message)``. + + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + ValueError: Raised when flag_values is non-default and does not match the + FlagValues of the provided FlagHolder instance. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + v = _validators_classes.SingleFlagValidator(flag_name, checker, message) + _add_validator(flag_values, v) + + +def validator(flag_name, message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """A function decorator for defining a flag validator. + + Registers the decorated function as a validator for flag_name, e.g.:: + + @flags.validator('foo') + def _CheckFoo(foo): + ... + + See :func:`register_validator` for the specification of checker function. + + Args: + flag_name: str | FlagHolder, name or holder of the flag to be checked. + Positional-only parameter. + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + Returns: + A function decorator that registers its function argument as a validator. + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + """ + + def decorate(function): + register_validator(flag_name, function, + message=message, + flag_values=flag_values) + return function + return decorate + + +def register_multi_flags_validator(flag_names, + multi_flags_checker, + message='Flags validation failed', + flag_values=_flagvalues.FLAGS): + """Adds a constraint to multiple flags. + + The constraint is validated when flags are initially parsed, and after each + change of the corresponding flag's value. + + Args: + flag_names: [str | FlagHolder], a list of the flag names or holders to be + checked. Positional-only parameter. + multi_flags_checker: callable, a function to validate the flag. + + * input - dict, with keys() being flag_names, and value for each key + being the value of the corresponding flag (string, boolean, etc). + * output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError. + + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Raises: + AttributeError: Raised when a flag is not registered as a valid flag name. + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + v = _validators_classes.MultiFlagsValidator( + flag_names, multi_flags_checker, message) + _add_validator(flag_values, v) + + +def multi_flags_validator(flag_names, + message='Flag validation failed', + flag_values=_flagvalues.FLAGS): + """A function decorator for defining a multi-flag validator. + + Registers the decorated function as a validator for flag_names, e.g.:: + + @flags.multi_flags_validator(['foo', 'bar']) + def _CheckFooBar(flags_dict): + ... + + See :func:`register_multi_flags_validator` for the specification of checker + function. + + Args: + flag_names: [str | FlagHolder], a list of the flag names or holders to be + checked. Positional-only parameter. + message: str, error text to be shown to the user if checker returns False. + If checker raises flags.ValidationError, message from the raised + error will be shown. + flag_values: flags.FlagValues, optional FlagValues instance to validate + against. + + Returns: + A function decorator that registers its function argument as a validator. + + Raises: + AttributeError: Raised when a flag is not registered as a valid flag name. + """ + + def decorate(function): + register_multi_flags_validator(flag_names, + function, + message=message, + flag_values=flag_values) + return function + + return decorate + + +def mark_flag_as_required(flag_name, flag_values=_flagvalues.FLAGS): + """Ensures that flag is not None during program execution. + + Registers a flag validator, which will follow usual validator rules. + Important note: validator will pass for any non-``None`` value, such as + ``False``, ``0`` (zero), ``''`` (empty string) and so on. + + If your module might be imported by others, and you only wish to make the flag + required when the module is directly executed, call this method like this:: + + if __name__ == '__main__': + flags.mark_flag_as_required('your_flag_name') + app.run() + + Args: + flag_name: str | FlagHolder, name or holder of the flag. + Positional-only parameter. + flag_values: flags.FlagValues, optional :class:`~absl.flags.FlagValues` + instance where the flag is defined. + Raises: + AttributeError: Raised when flag_name is not registered as a valid flag + name. + ValueError: Raised when flag_values is non-default and does not match the + FlagValues of the provided FlagHolder instance. + """ + flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) + if flag_values[flag_name].default is not None: + warnings.warn( + 'Flag --%s has a non-None default value; therefore, ' + 'mark_flag_as_required will pass even if flag is not specified in the ' + 'command line!' % flag_name, + stacklevel=2) + register_validator( + flag_name, + lambda value: value is not None, + message='Flag --{} must have a value other than None.'.format(flag_name), + flag_values=flag_values) + + +def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): + """Ensures that flags are not None during program execution. + + If your module might be imported by others, and you only wish to make the flag + required when the module is directly executed, call this method like this:: + + if __name__ == '__main__': + flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) + app.run() + + Args: + flag_names: Sequence[str | FlagHolder], names or holders of the flags. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + Raises: + AttributeError: If any of flag name has not already been defined as a flag. + """ + for flag_name in flag_names: + mark_flag_as_required(flag_name, flag_values) + + +def mark_flags_as_mutual_exclusive(flag_names, required=False, + flag_values=_flagvalues.FLAGS): + """Ensures that only one flag among flag_names is not None. + + Important note: This validator checks if flag values are ``None``, and it does + not distinguish between default and explicit values. Therefore, this validator + does not make sense when applied to flags with default values other than None, + including other false values (e.g. ``False``, ``0``, ``''``, ``[]``). That + includes multi flags with a default value of ``[]`` instead of None. + + Args: + flag_names: [str | FlagHolder], names or holders of flags. + Positional-only parameter. + required: bool. If true, exactly one of the flags must have a value other + than None. Otherwise, at most one of the flags can have a value other + than None, and it is valid for all of the flags to be None. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + + Raises: + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + for flag_name in flag_names: + if flag_values[flag_name].default is not None: + warnings.warn( + 'Flag --{} has a non-None default value. That does not make sense ' + 'with mark_flags_as_mutual_exclusive, which checks whether the ' + 'listed flags have a value other than None.'.format(flag_name), + stacklevel=2) + + def validate_mutual_exclusion(flags_dict): + flag_count = sum(1 for val in flags_dict.values() if val is not None) + if flag_count == 1 or (not required and flag_count == 0): + return True + raise _exceptions.ValidationError( + '{} one of ({}) must have a value other than None.'.format( + 'Exactly' if required else 'At most', ', '.join(flag_names))) + + register_multi_flags_validator( + flag_names, validate_mutual_exclusion, flag_values=flag_values) + + +def mark_bool_flags_as_mutual_exclusive(flag_names, required=False, + flag_values=_flagvalues.FLAGS): + """Ensures that only one flag among flag_names is True. + + Args: + flag_names: [str | FlagHolder], names or holders of flags. + Positional-only parameter. + required: bool. If true, exactly one flag must be True. Otherwise, at most + one flag can be True, and it is valid for all flags to be False. + flag_values: flags.FlagValues, optional FlagValues instance where the flags + are defined. + + Raises: + ValueError: Raised when multiple FlagValues are used in the same + invocation. This can occur when FlagHolders have different `_flagvalues` + or when str-type flag_names entries are present and the `flag_values` + argument does not match that of provided FlagHolder(s). + """ + flag_names, flag_values = _flagvalues.resolve_flag_refs( + flag_names, flag_values) + for flag_name in flag_names: + if not flag_values[flag_name].boolean: + raise _exceptions.ValidationError( + 'Flag --{} is not Boolean, which is required for flags used in ' + 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name)) + + def validate_boolean_mutual_exclusion(flags_dict): + flag_count = sum(bool(val) for val in flags_dict.values()) + if flag_count == 1 or (not required and flag_count == 0): + return True + raise _exceptions.ValidationError( + '{} one of ({}) must be True.'.format( + 'Exactly' if required else 'At most', ', '.join(flag_names))) + + register_multi_flags_validator( + flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values) + + +def _add_validator(fv, validator_instance): + """Register new flags validator to be checked. + + Args: + fv: flags.FlagValues, the FlagValues instance to add the validator. + validator_instance: validators.Validator, the validator to add. + Raises: + KeyError: Raised when validators work with a non-existing flag. + """ + for flag_name in validator_instance.get_flags_names(): + fv[flag_name].validators.append(validator_instance) diff --git a/venv/lib/python3.10/site-packages/absl/flags/_validators_classes.py b/venv/lib/python3.10/site-packages/absl/flags/_validators_classes.py new file mode 100644 index 0000000000000000000000000000000000000000..59100c8e6dc9528634c1752a8de18983eba26676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/_validators_classes.py @@ -0,0 +1,172 @@ +# Copyright 2021 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defines *private* classes used for flag validators. + +Do NOT import this module. DO NOT use anything from this module. They are +private APIs. +""" + +from absl.flags import _exceptions + + +class Validator(object): + """Base class for flags validators. + + Users should NOT overload these classes, and use flags.Register... + methods instead. + """ + + # Used to assign each validator an unique insertion_index + validators_count = 0 + + def __init__(self, checker, message): + """Constructor to create all validators. + + Args: + checker: function to verify the constraint. + Input of this method varies, see SingleFlagValidator and + multi_flags_validator for a detailed description. + message: str, error message to be shown to the user. + """ + self.checker = checker + self.message = message + Validator.validators_count += 1 + # Used to assert validators in the order they were registered. + self.insertion_index = Validator.validators_count + + def verify(self, flag_values): + """Verifies that constraint is satisfied. + + flags library calls this method to verify Validator's constraint. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Raises: + Error: Raised if constraint is not satisfied. + """ + param = self._get_input_to_checker_function(flag_values) + if not self.checker(param): + raise _exceptions.ValidationError(self.message) + + def get_flags_names(self): + """Returns the names of the flags checked by this validator. + + Returns: + [string], names of the flags. + """ + raise NotImplementedError('This method should be overloaded') + + def print_flags_with_values(self, flag_values): + raise NotImplementedError('This method should be overloaded') + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, containing all flags. + Returns: + The input to be given to checker. The return type depends on the specific + validator. + """ + raise NotImplementedError('This method should be overloaded') + + +class SingleFlagValidator(Validator): + """Validator behind register_validator() method. + + Validates that a single flag passes its checker function. The checker function + takes the flag value and returns True (if value looks fine) or, if flag value + is not valid, either returns False or raises an Exception. + """ + + def __init__(self, flag_name, checker, message): + """Constructor. + + Args: + flag_name: string, name of the flag. + checker: function to verify the validator. + input - value of the corresponding flag (string, boolean, etc). + output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError(desired_error_message). + message: str, error message to be shown to the user if validator's + condition is not satisfied. + """ + super(SingleFlagValidator, self).__init__(checker, message) + self.flag_name = flag_name + + def get_flags_names(self): + return [self.flag_name] + + def print_flags_with_values(self, flag_values): + return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value) + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Returns: + object, the input to be given to checker. + """ + return flag_values[self.flag_name].value + + +class MultiFlagsValidator(Validator): + """Validator behind register_multi_flags_validator method. + + Validates that flag values pass their common checker function. The checker + function takes flag values and returns True (if values look fine) or, + if values are not valid, either returns False or raises an Exception. + """ + + def __init__(self, flag_names, checker, message): + """Constructor. + + Args: + flag_names: [str], containing names of the flags used by checker. + checker: function to verify the validator. + input - dict, with keys() being flag_names, and value for each + key being the value of the corresponding flag (string, boolean, + etc). + output - bool, True if validator constraint is satisfied. + If constraint is not satisfied, it should either return False or + raise flags.ValidationError(desired_error_message). + message: str, error message to be shown to the user if validator's + condition is not satisfied + """ + super(MultiFlagsValidator, self).__init__(checker, message) + self.flag_names = flag_names + + def _get_input_to_checker_function(self, flag_values): + """Given flag values, returns the input to be given to checker. + + Args: + flag_values: flags.FlagValues, the FlagValues instance to get flags from. + Returns: + dict, with keys() being self.flag_names, and value for each key + being the value of the corresponding flag (string, boolean, etc). + """ + return dict([key, flag_values[key].value] for key in self.flag_names) + + def print_flags_with_values(self, flag_values): + prefix = 'flags ' + flags_with_values = [] + for key in self.flag_names: + flags_with_values.append('%s=%s' % (key, flag_values[key].value)) + return prefix + ', '.join(flags_with_values) + + def get_flags_names(self): + return self.flag_names diff --git a/venv/lib/python3.10/site-packages/absl/flags/argparse_flags.py b/venv/lib/python3.10/site-packages/absl/flags/argparse_flags.py new file mode 100644 index 0000000000000000000000000000000000000000..f05c7943ec53b5364817675ef4fc94bbd9976df4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/flags/argparse_flags.py @@ -0,0 +1,388 @@ +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides argparse integration with absl.flags. + +``argparse_flags.ArgumentParser`` is a drop-in replacement for +:class:`argparse.ArgumentParser`. It takes care of collecting and defining absl +flags in :mod:`argparse`. + +Here is a simple example:: + + # Assume the following absl.flags is defined in another module: + # + # from absl import flags + # flags.DEFINE_string('echo', None, 'The echo message.') + # + parser = argparse_flags.ArgumentParser( + description='A demo of absl.flags and argparse integration.') + parser.add_argument('--header', help='Header message to print.') + + # The parser will also accept the absl flag `--echo`. + # The `header` value is available as `args.header` just like a regular + # argparse flag. The absl flag `--echo` continues to be available via + # `absl.flags.FLAGS` if you want to access it. + args = parser.parse_args() + + # Example usages: + # ./program --echo='A message.' --header='A header' + # ./program --header 'A header' --echo 'A message.' + + +Here is another example demonstrates subparsers:: + + parser = argparse_flags.ArgumentParser(description='A subcommands demo.') + parser.add_argument('--header', help='The header message to print.') + + subparsers = parser.add_subparsers(help='The command to execute.') + + roll_dice_parser = subparsers.add_parser( + 'roll_dice', help='Roll a dice.', + # By default, absl flags can also be specified after the sub-command. + # To only allow them before sub-command, pass + # `inherited_absl_flags=None`. + inherited_absl_flags=None) + roll_dice_parser.add_argument('--num_faces', type=int, default=6) + roll_dice_parser.set_defaults(command=roll_dice) + + shuffle_parser = subparsers.add_parser('shuffle', help='Shuffle inputs.') + shuffle_parser.add_argument( + 'inputs', metavar='I', nargs='+', help='Inputs to shuffle.') + shuffle_parser.set_defaults(command=shuffle) + + args = parser.parse_args(argv[1:]) + args.command(args) + + # Example usages: + # ./program --echo='A message.' roll_dice --num_faces=6 + # ./program shuffle --echo='A message.' 1 2 3 4 + + +There are several differences between :mod:`absl.flags` and +:mod:`~absl.flags.argparse_flags`: + +1. Flags defined with absl.flags are parsed differently when using the + argparse parser. Notably: + + 1) absl.flags allows both single-dash and double-dash for any flag, and + doesn't distinguish them; argparse_flags only allows double-dash for + flag's regular name, and single-dash for flag's ``short_name``. + 2) Boolean flags in absl.flags can be specified with ``--bool``, + ``--nobool``, as well as ``--bool=true/false`` (though not recommended); + in argparse_flags, it only allows ``--bool``, ``--nobool``. + +2. Help related flag differences: + + 1) absl.flags does not define help flags, absl.app does that; argparse_flags + defines help flags unless passed with ``add_help=False``. + 2) absl.app supports ``--helpxml``; argparse_flags does not. + 3) argparse_flags supports ``-h``; absl.app does not. +""" + +import argparse +import sys + +from absl import flags + + +_BUILT_IN_FLAGS = frozenset({ + 'help', + 'helpshort', + 'helpfull', + 'helpxml', + 'flagfile', + 'undefok', +}) + + +class ArgumentParser(argparse.ArgumentParser): + """Custom ArgumentParser class to support special absl flags.""" + + def __init__(self, **kwargs): + """Initializes ArgumentParser. + + Args: + **kwargs: same as argparse.ArgumentParser, except: + 1. It also accepts `inherited_absl_flags`: the absl flags to inherit. + The default is the global absl.flags.FLAGS instance. Pass None to + ignore absl flags. + 2. The `prefix_chars` argument must be the default value '-'. + + Raises: + ValueError: Raised when prefix_chars is not '-'. + """ + prefix_chars = kwargs.get('prefix_chars', '-') + if prefix_chars != '-': + raise ValueError( + 'argparse_flags.ArgumentParser only supports "-" as the prefix ' + 'character, found "{}".'.format(prefix_chars)) + + # Remove inherited_absl_flags before calling super. + self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS) + # Now call super to initialize argparse.ArgumentParser before calling + # add_argument in _define_absl_flags. + super(ArgumentParser, self).__init__(**kwargs) + + if self.add_help: + # -h and --help are defined in super. + # Also add the --helpshort and --helpfull flags. + self.add_argument( + # Action 'help' defines a similar flag to -h/--help. + '--helpshort', action='help', + default=argparse.SUPPRESS, help=argparse.SUPPRESS) + self.add_argument( + '--helpfull', action=_HelpFullAction, + default=argparse.SUPPRESS, help='show full help message and exit') + + if self._inherited_absl_flags is not None: + self.add_argument( + '--undefok', default=argparse.SUPPRESS, help=argparse.SUPPRESS) + self._define_absl_flags(self._inherited_absl_flags) + + def parse_known_args(self, args=None, namespace=None): + if args is None: + args = sys.argv[1:] + if self._inherited_absl_flags is not None: + # Handle --flagfile. + # Explicitly specify force_gnu=True, since argparse behaves like + # gnu_getopt: flags can be specified after positional arguments. + args = self._inherited_absl_flags.read_flags_from_files( + args, force_gnu=True) + + undefok_missing = object() + undefok = getattr(namespace, 'undefok', undefok_missing) + + namespace, args = super(ArgumentParser, self).parse_known_args( + args, namespace) + + # For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where + # sub-parsers don't preserve existing namespace attributes. + # Restore the undefok attribute if a sub-parser dropped it. + if undefok is not undefok_missing: + namespace.undefok = undefok + + if self._inherited_absl_flags is not None: + # Handle --undefok. At this point, `args` only contains unknown flags, + # so it won't strip defined flags that are also specified with --undefok. + # For Python <= 2.7.8: https://bugs.python.org/issue9351, a bug where + # sub-parsers don't preserve existing namespace attributes. The undefok + # attribute might not exist because a subparser dropped it. + if hasattr(namespace, 'undefok'): + args = _strip_undefok_args(namespace.undefok, args) + # absl flags are not exposed in the Namespace object. See Namespace: + # https://docs.python.org/3/library/argparse.html#argparse.Namespace. + del namespace.undefok + self._inherited_absl_flags.mark_as_parsed() + try: + self._inherited_absl_flags.validate_all_flags() + except flags.IllegalFlagValueError as e: + self.error(str(e)) + + return namespace, args + + def _define_absl_flags(self, absl_flags): + """Defines flags from absl_flags.""" + key_flags = set(absl_flags.get_key_flags_for_module(sys.argv[0])) + for name in absl_flags: + if name in _BUILT_IN_FLAGS: + # Do not inherit built-in flags. + continue + flag_instance = absl_flags[name] + # Each flags with short_name appears in FLAGS twice, so only define + # when the dictionary key is equal to the regular name. + if name == flag_instance.name: + # Suppress the flag in the help short message if it's not a main + # module's key flag. + suppress = flag_instance not in key_flags + self._define_absl_flag(flag_instance, suppress) + + def _define_absl_flag(self, flag_instance, suppress): + """Defines a flag from the flag_instance.""" + flag_name = flag_instance.name + short_name = flag_instance.short_name + argument_names = ['--' + flag_name] + if short_name: + argument_names.insert(0, '-' + short_name) + if suppress: + helptext = argparse.SUPPRESS + else: + # argparse help string uses %-formatting. Escape the literal %'s. + helptext = flag_instance.help.replace('%', '%%') + if flag_instance.boolean: + # Only add the `no` form to the long name. + argument_names.append('--no' + flag_name) + self.add_argument( + *argument_names, action=_BooleanFlagAction, help=helptext, + metavar=flag_instance.name.upper(), + flag_instance=flag_instance) + else: + self.add_argument( + *argument_names, action=_FlagAction, help=helptext, + metavar=flag_instance.name.upper(), + flag_instance=flag_instance) + + +class _FlagAction(argparse.Action): + """Action class for Abseil non-boolean flags.""" + + def __init__( + self, + option_strings, + dest, + help, # pylint: disable=redefined-builtin + metavar, + flag_instance, + default=argparse.SUPPRESS): + """Initializes _FlagAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + help: See argparse.Action. + metavar: See argparse.Action. + flag_instance: absl.flags.Flag, the absl flag instance. + default: Ignored. The flag always uses dest=argparse.SUPPRESS so it + doesn't affect the parsing result. + """ + del dest + self._flag_instance = flag_instance + super(_FlagAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + self._flag_instance.parse(values) + self._flag_instance.using_default_value = False + + +class _BooleanFlagAction(argparse.Action): + """Action class for Abseil boolean flags.""" + + def __init__( + self, + option_strings, + dest, + help, # pylint: disable=redefined-builtin + metavar, + flag_instance, + default=argparse.SUPPRESS): + """Initializes _BooleanFlagAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + help: See argparse.Action. + metavar: See argparse.Action. + flag_instance: absl.flags.Flag, the absl flag instance. + default: Ignored. The flag always uses dest=argparse.SUPPRESS so it + doesn't affect the parsing result. + """ + del dest, default + self._flag_instance = flag_instance + flag_names = [self._flag_instance.name] + if self._flag_instance.short_name: + flag_names.append(self._flag_instance.short_name) + self._flag_names = frozenset(flag_names) + super(_BooleanFlagAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + nargs=0, # Does not accept values, only `--bool` or `--nobool`. + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + if not isinstance(values, list) or values: + raise ValueError('values must be an empty list.') + if option_string.startswith('--'): + option = option_string[2:] + else: + option = option_string[1:] + if option in self._flag_names: + self._flag_instance.parse('true') + else: + if not option.startswith('no') or option[2:] not in self._flag_names: + raise ValueError('invalid option_string: ' + option_string) + self._flag_instance.parse('false') + self._flag_instance.using_default_value = False + + +class _HelpFullAction(argparse.Action): + """Action class for --helpfull flag.""" + + def __init__(self, option_strings, dest, default, help): # pylint: disable=redefined-builtin + """Initializes _HelpFullAction. + + Args: + option_strings: See argparse.Action. + dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. + default: Ignored. + help: See argparse.Action. + """ + del dest, default + super(_HelpFullAction, self).__init__( + option_strings=option_strings, + dest=argparse.SUPPRESS, + default=argparse.SUPPRESS, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + """See https://docs.python.org/3/library/argparse.html#action-classes.""" + # This only prints flags when help is not argparse.SUPPRESS. + # It includes user defined argparse flags, as well as main module's + # key absl flags. Other absl flags use argparse.SUPPRESS, so they aren't + # printed here. + parser.print_help() + + absl_flags = parser._inherited_absl_flags # pylint: disable=protected-access + if absl_flags is not None: + modules = sorted(absl_flags.flags_by_module_dict()) + main_module = sys.argv[0] + if main_module in modules: + # The main module flags are already printed in parser.print_help(). + modules.remove(main_module) + print(absl_flags._get_help_for_modules( # pylint: disable=protected-access + modules, prefix='', include_special_flags=True)) + parser.exit() + + +def _strip_undefok_args(undefok, args): + """Returns a new list of args after removing flags in --undefok.""" + if undefok: + undefok_names = set(name.strip() for name in undefok.split(',')) + undefok_names |= set('no' + name for name in undefok_names) + # Remove undefok flags. + args = [arg for arg in args if not _is_undefok(arg, undefok_names)] + return args + + +def _is_undefok(arg, undefok_names): + """Returns whether we can ignore arg based on a set of undefok flag names.""" + if not arg.startswith('-'): + return False + if arg.startswith('--'): + arg_without_dash = arg[2:] + else: + arg_without_dash = arg[1:] + if '=' in arg_without_dash: + name, _ = arg_without_dash.split('=', 1) + else: + name = arg_without_dash + if name in undefok_names: + return True + return False diff --git a/venv/lib/python3.10/site-packages/absl/logging/__init__.py b/venv/lib/python3.10/site-packages/absl/logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..42166cd892f99c34f637dd661576897196a357f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/logging/__init__.py @@ -0,0 +1,1281 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abseil Python logging module implemented on top of standard logging. + +Simple usage:: + + from absl import logging + + logging.info('Interesting Stuff') + logging.info('Interesting Stuff with Arguments: %d', 42) + + logging.set_verbosity(logging.INFO) + logging.log(logging.DEBUG, 'This will *not* be printed') + logging.set_verbosity(logging.DEBUG) + logging.log(logging.DEBUG, 'This will be printed') + + logging.warning('Worrying Stuff') + logging.error('Alarming Stuff') + logging.fatal('AAAAHHHHH!!!!') # Process exits. + +Usage note: Do not pre-format the strings in your program code. +Instead, let the logging module perform argument interpolation. +This saves cycles because strings that don't need to be printed +are never formatted. Note that this module does not attempt to +interpolate arguments when no arguments are given. In other words:: + + logging.info('Interesting Stuff: %s') + +does not raise an exception because logging.info() has only one +argument, the message string. + +"Lazy" evaluation for debugging +------------------------------- + +If you do something like this:: + + logging.debug('Thing: %s', thing.ExpensiveOp()) + +then the ExpensiveOp will be evaluated even if nothing +is printed to the log. To avoid this, use the level_debug() function:: + + if logging.level_debug(): + logging.debug('Thing: %s', thing.ExpensiveOp()) + +Per file level logging is supported by logging.vlog() and +logging.vlog_is_on(). For example:: + + if logging.vlog_is_on(2): + logging.vlog(2, very_expensive_debug_message()) + +Notes on Unicode +---------------- + +The log output is encoded as UTF-8. Don't pass data in other encodings in +bytes() instances -- instead pass unicode string instances when you need to +(for both the format string and arguments). + +Note on critical and fatal: +Standard logging module defines fatal as an alias to critical, but it's not +documented, and it does NOT actually terminate the program. +This module only defines fatal but not critical, and it DOES terminate the +program. + +The differences in behavior are historical and unfortunate. +""" + +import collections +from collections import abc +import getpass +import io +import itertools +import logging +import os +import socket +import struct +import sys +import tempfile +import threading +import time +import timeit +import traceback +import types +import warnings + +from absl import flags +from absl.logging import converter + +# pylint: disable=g-import-not-at-top +try: + from typing import NoReturn +except ImportError: + pass + +# pylint: enable=g-import-not-at-top + +FLAGS = flags.FLAGS + + +# Logging levels. +FATAL = converter.ABSL_FATAL +ERROR = converter.ABSL_ERROR +WARNING = converter.ABSL_WARNING +WARN = converter.ABSL_WARNING # Deprecated name. +INFO = converter.ABSL_INFO +DEBUG = converter.ABSL_DEBUG + +# Regex to match/parse log line prefixes. +ABSL_LOGGING_PREFIX_REGEX = ( + r'^(?P[IWEF])' + r'(?P\d\d)(?P\d\d) ' + r'(?P\d\d):(?P\d\d):(?P\d\d)' + r'\.(?P\d\d\d\d\d\d) +' + r'(?P-?\d+) ' + r'(?P[a-zA-Z<][\w._<>-]+):(?P\d+)') + + +# Mask to convert integer thread ids to unsigned quantities for logging purposes +_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1 + +# Extra property set on the LogRecord created by ABSLLogger when its level is +# CRITICAL/FATAL. +_ABSL_LOG_FATAL = '_absl_log_fatal' +# Extra prefix added to the log message when a non-absl logger logs a +# CRITICAL/FATAL message. +_CRITICAL_PREFIX = 'CRITICAL - ' + +# Used by findCaller to skip callers from */logging/__init__.py. +_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.') + +# The ABSL logger instance, initialized in _initialize(). +_absl_logger = None +# The ABSL handler instance, initialized in _initialize(). +_absl_handler = None + + +_CPP_NAME_TO_LEVELS = { + 'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here. + 'info': '0', + 'warning': '1', + 'warn': '1', + 'error': '2', + 'fatal': '3' +} + +_CPP_LEVEL_TO_NAMES = { + '0': 'info', + '1': 'warning', + '2': 'error', + '3': 'fatal', +} + + +class _VerbosityFlag(flags.Flag): + """Flag class for -v/--verbosity.""" + + def __init__(self, *args, **kwargs): + super(_VerbosityFlag, self).__init__( + flags.IntegerParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + self._value = v + self._update_logging_levels() + + def _update_logging_levels(self): + """Updates absl logging levels to the current verbosity. + + Visibility: module-private + """ + if not _absl_logger: + return + + if self._value <= converter.ABSL_DEBUG: + standard_verbosity = converter.absl_to_standard(self._value) + else: + # --verbosity is set to higher than 1 for vlog. + standard_verbosity = logging.DEBUG - (self._value - 1) + + # Also update root level when absl_handler is used. + if _absl_handler in logging.root.handlers: + # Make absl logger inherit from the root logger. absl logger might have + # a non-NOTSET value if logging.set_verbosity() is called at import time. + _absl_logger.setLevel(logging.NOTSET) + logging.root.setLevel(standard_verbosity) + else: + _absl_logger.setLevel(standard_verbosity) + + +class _LoggerLevelsFlag(flags.Flag): + """Flag class for --logger_levels.""" + + def __init__(self, *args, **kwargs): + super(_LoggerLevelsFlag, self).__init__( + _LoggerLevelsParser(), + _LoggerLevelsSerializer(), + *args, **kwargs) + + @property + def value(self): + # For lack of an immutable type, be defensive and return a copy. + # Modifications to the dict aren't supported and won't have any affect. + # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so + # just return a copy. + return self._value.copy() + + @value.setter + def value(self, v): + self._value = {} if v is None else v + self._update_logger_levels() + + def _update_logger_levels(self): + # Visibility: module-private. + # This is called by absl.app.run() during initialization. + for name, level in self._value.items(): + logging.getLogger(name).setLevel(level) + + +class _LoggerLevelsParser(flags.ArgumentParser): + """Parser for --logger_levels flag.""" + + def parse(self, value): + if isinstance(value, abc.Mapping): + return value + + pairs = [pair.strip() for pair in value.split(',') if pair.strip()] + + # Preserve the order so that serialization is deterministic. + levels = collections.OrderedDict() + for name_level in pairs: + name, level = name_level.split(':', 1) + name = name.strip() + level = level.strip() + levels[name] = level + return levels + + +class _LoggerLevelsSerializer(object): + """Serializer for --logger_levels flag.""" + + def serialize(self, value): + if isinstance(value, str): + return value + return ','.join( + '{}:{}'.format(name, level) for name, level in value.items()) + + +class _StderrthresholdFlag(flags.Flag): + """Flag class for --stderrthreshold.""" + + def __init__(self, *args, **kwargs): + super(_StderrthresholdFlag, self).__init__( + flags.ArgumentParser(), + flags.ArgumentSerializer(), + *args, **kwargs) + + @property + def value(self): + return self._value + + @value.setter + def value(self, v): + if v in _CPP_LEVEL_TO_NAMES: + # --stderrthreshold also accepts numeric strings whose values are + # Abseil C++ log levels. + cpp_value = int(v) + v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings. + elif v.lower() in _CPP_NAME_TO_LEVELS: + v = v.lower() + if v == 'warn': + v = 'warning' # Use 'warning' as the canonical name. + cpp_value = int(_CPP_NAME_TO_LEVELS[v]) + else: + raise ValueError( + '--stderrthreshold must be one of (case-insensitive) ' + "'debug', 'info', 'warning', 'error', 'fatal', " + "or '0', '1', '2', '3', not '%s'" % v) + + self._value = v + + +LOGTOSTDERR = flags.DEFINE_boolean( + 'logtostderr', + False, + 'Should only log to stderr?', + allow_override_cpp=True, +) +ALSOLOGTOSTDERR = flags.DEFINE_boolean( + 'alsologtostderr', + False, + 'also log to stderr?', + allow_override_cpp=True, +) +LOG_DIR = flags.DEFINE_string( + 'log_dir', + os.getenv('TEST_TMPDIR', ''), + 'directory to write logfiles into', + allow_override_cpp=True, +) +VERBOSITY = flags.DEFINE_flag( + _VerbosityFlag( + 'verbosity', + -1, + ( + 'Logging verbosity level. Messages logged at this level or lower' + ' will be included. Set to 1 for debug logging. If the flag was not' + ' set or supplied, the value will be changed from the default of -1' + ' (warning) to 0 (info) after flags are parsed.' + ), + short_name='v', + allow_hide_cpp=True, + ) +) +LOGGER_LEVELS = flags.DEFINE_flag( + _LoggerLevelsFlag( + 'logger_levels', + {}, + ( + 'Specify log level of loggers. The format is a CSV list of ' + '`name:level`. Where `name` is the logger name used with ' + '`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, ' + 'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`' + ), + ) +) +STDERRTHRESHOLD = flags.DEFINE_flag( + _StderrthresholdFlag( + 'stderrthreshold', + 'fatal', + ( + 'log messages at this level, or more severe, to stderr in ' + 'addition to the logfile. Possible values are ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'Obsoletes --alsologtostderr. Using --alsologtostderr ' + 'cancels the effect of this flag. Please also note that ' + 'this flag is subject to --verbosity and requires logfile ' + 'not be stderr.' + ), + allow_hide_cpp=True, + ) +) +SHOWPREFIXFORINFO = flags.DEFINE_boolean( + 'showprefixforinfo', + True, + ( + 'If False, do not prepend prefix to info messages ' + "when it's logged to stderr, " + '--verbosity is set to INFO level, ' + 'and python logging is used.' + ), +) + + +def get_verbosity(): + """Returns the logging verbosity.""" + return FLAGS['verbosity'].value + + +def set_verbosity(v): + """Sets the logging verbosity. + + Causes all messages of level <= v to be logged, + and all messages of level > v to be silently discarded. + + Args: + v: int|str, the verbosity level as an integer or string. Legal string values + are those that can be coerced to an integer as well as case-insensitive + 'debug', 'info', 'warning', 'error', and 'fatal'. + """ + try: + new_level = int(v) + except ValueError: + new_level = converter.ABSL_NAMES[v.upper()] + FLAGS.verbosity = new_level + + +def set_stderrthreshold(s): + """Sets the stderr threshold to the value passed in. + + Args: + s: str|int, valid strings values are case-insensitive 'debug', + 'info', 'warning', 'error', and 'fatal'; valid integer values are + logging.DEBUG|INFO|WARNING|ERROR|FATAL. + + Raises: + ValueError: Raised when s is an invalid value. + """ + if s in converter.ABSL_LEVELS: + FLAGS.stderrthreshold = converter.ABSL_LEVELS[s] + elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES: + FLAGS.stderrthreshold = s + else: + raise ValueError( + 'set_stderrthreshold only accepts integer absl logging level ' + 'from -3 to 1, or case-insensitive string values ' + "'debug', 'info', 'warning', 'error', and 'fatal'. " + 'But found "{}" ({}).'.format(s, type(s))) + + +def fatal(msg, *args, **kwargs): + # type: (Any, Any, Any) -> NoReturn + """Logs a fatal message.""" + log(FATAL, msg, *args, **kwargs) + + +def error(msg, *args, **kwargs): + """Logs an error message.""" + log(ERROR, msg, *args, **kwargs) + + +def warning(msg, *args, **kwargs): + """Logs a warning message.""" + log(WARNING, msg, *args, **kwargs) + + +def warn(msg, *args, **kwargs): + """Deprecated, use 'warning' instead.""" + warnings.warn("The 'warn' function is deprecated, use 'warning' instead", + DeprecationWarning, 2) + log(WARNING, msg, *args, **kwargs) + + +def info(msg, *args, **kwargs): + """Logs an info message.""" + log(INFO, msg, *args, **kwargs) + + +def debug(msg, *args, **kwargs): + """Logs a debug message.""" + log(DEBUG, msg, *args, **kwargs) + + +def exception(msg, *args, exc_info=True, **kwargs): + """Logs an exception, with traceback and message.""" + error(msg, *args, exc_info=exc_info, **kwargs) + + +# Counter to keep track of number of log entries per token. +_log_counter_per_token = {} + + +def _get_next_log_count_per_token(token): + """Wrapper for _log_counter_per_token. Thread-safe. + + Args: + token: The token for which to look up the count. + + Returns: + The number of times this function has been called with + *token* as an argument (starting at 0). + """ + # Can't use a defaultdict because defaultdict isn't atomic, whereas + # setdefault is. + return next(_log_counter_per_token.setdefault(token, itertools.count())) + + +def log_every_n(level, msg, n, *args): + """Logs ``msg % args`` at level 'level' once per 'n' times. + + Logs the 1st call, (N+1)st call, (2N+1)st call, etc. + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the number of times this should be called before it is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, not (count % n), *args) + + +# Keeps track of the last log time of the given token. +# Note: must be a dict since set/get is atomic in CPython. +# Note: entries are never released as their number is expected to be low. +_log_timer_per_token = {} + + +def _seconds_have_elapsed(token, num_seconds): + """Tests if 'num_seconds' have passed since 'token' was requested. + + Not strictly thread-safe - may log with the wrong frequency if called + concurrently from multiple threads. Accuracy depends on resolution of + 'timeit.default_timer()'. + + Always returns True on the first call for a given 'token'. + + Args: + token: The token for which to look up the count. + num_seconds: The number of seconds to test for. + + Returns: + Whether it has been >= 'num_seconds' since 'token' was last requested. + """ + now = timeit.default_timer() + then = _log_timer_per_token.get(token, None) + if then is None or (now - then) >= num_seconds: + _log_timer_per_token[token] = now + return True + else: + return False + + +def log_every_n_seconds(level, msg, n_seconds, *args): + """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call. + + Logs the first call, logs subsequent calls if 'n' seconds have elapsed since + the last logging call from the same call site (file + line). Not thread-safe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n_seconds: float or int, seconds which should elapse before logging again. + *args: The args to be substituted into the msg. + """ + should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) + log_if(level, msg, should_log, *args) + + +def log_first_n(level, msg, n, *args): + """Logs ``msg % args`` at level ``level`` only first ``n`` times. + + Not threadsafe. + + Args: + level: int, the absl logging level at which to log. + msg: str, the message to be logged. + n: int, the maximal number of times the message is logged. + *args: The args to be substituted into the msg. + """ + count = _get_next_log_count_per_token(get_absl_logger().findCaller()) + log_if(level, msg, count < n, *args) + + +def log_if(level, msg, condition, *args): + """Logs ``msg % args`` at level ``level`` only if condition is fulfilled.""" + if condition: + log(level, msg, *args) + + +def log(level, msg, *args, **kwargs): + """Logs ``msg % args`` at absl logging level ``level``. + + If no args are given just print msg, ignoring any interpolation specifiers. + + Args: + level: int, the absl logging level at which to log the message + (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging + level constants are also supported, callers should prefer explicit + logging.vlog() calls for such purpose. + + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + + # Match standard logging's behavior. Before use_absl_handler() and + # logging is configured, there is no handler attached on _absl_logger nor + # logging.root. So logs go no where. + if not logging.root.handlers: + logging.basicConfig() + + _absl_logger.log(standard_level, msg, *args, **kwargs) + + +def vlog(level, msg, *args, **kwargs): + """Log ``msg % args`` at C++ vlog level ``level``. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer logging.log|debug|info|... calls for such purpose. + msg: str, the message to be logged. + *args: The args to be substituted into the msg. + **kwargs: May contain exc_info to add exception traceback to message. + """ + log(level, msg, *args, **kwargs) + + +def vlog_is_on(level): + """Checks if vlog is enabled for the given level in caller's source file. + + Args: + level: int, the C++ verbose logging level at which to log the message, + e.g. 1, 2, 3, 4... While absl level constants are also supported, + callers should prefer level_debug|level_info|... calls for + checking those. + + Returns: + True if logging is turned on for that level. + """ + + if level > converter.ABSL_DEBUG: + # Even though this function supports level that is greater than 1, users + # should use logging.vlog instead for such cases. + # Treat this as vlog, 1 is equivalent to DEBUG. + standard_level = converter.STANDARD_DEBUG - (level - 1) + else: + if level < converter.ABSL_FATAL: + level = converter.ABSL_FATAL + standard_level = converter.absl_to_standard(level) + return _absl_logger.isEnabledFor(standard_level) + + +def flush(): + """Flushes all log files.""" + get_absl_handler().flush() + + +def level_debug(): + """Returns True if debug logging is turned on.""" + return get_verbosity() >= DEBUG + + +def level_info(): + """Returns True if info logging is turned on.""" + return get_verbosity() >= INFO + + +def level_warning(): + """Returns True if warning logging is turned on.""" + return get_verbosity() >= WARNING + + +level_warn = level_warning # Deprecated function. + + +def level_error(): + """Returns True if error logging is turned on.""" + return get_verbosity() >= ERROR + + +def get_log_file_name(level=INFO): + """Returns the name of the log file. + + For Python logging, only one file is used and level is ignored. And it returns + empty string if it logs to stderr/stdout or the log stream has no `name` + attribute. + + Args: + level: int, the absl.logging level. + + Raises: + ValueError: Raised when `level` has an invalid value. + """ + if level not in converter.ABSL_LEVELS: + raise ValueError('Invalid absl.logging level {}'.format(level)) + stream = get_absl_handler().python_handler.stream + if (stream == sys.stderr or stream == sys.stdout or + not hasattr(stream, 'name')): + return '' + else: + return stream.name + + +def find_log_dir_and_names(program_name=None, log_dir=None): + """Computes the directory and filename prefix for log file. + + Args: + program_name: str|None, the filename part of the path to the program that + is running without its extension. e.g: if your program is called + ``usr/bin/foobar.py`` this method should probably be called with + ``program_name='foobar`` However, this is just a convention, you can + pass in any string you want, and it will be used as part of the + log filename. If you don't pass in anything, the default behavior + is as described in the example. In python standard logging mode, + the program_name will be prepended with ``py_`` if it is the + ``program_name`` argument is omitted. + log_dir: str|None, the desired log directory. + + Returns: + (log_dir, file_prefix, symlink_prefix) + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + if not program_name: + # Strip the extension (foobar.par becomes foobar, and + # fubar.py becomes fubar). We do this so that the log + # file names are similar to C++ log file names. + program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] + + # Prepend py_ to files so that python code gets a unique file, and + # so that C++ libraries do not try to write to the same log files as us. + program_name = 'py_%s' % program_name + + actual_log_dir = find_log_dir(log_dir=log_dir) + + try: + username = getpass.getuser() + except KeyError: + # This can happen, e.g. when running under docker w/o passwd file. + if hasattr(os, 'getuid'): + # Windows doesn't have os.getuid + username = str(os.getuid()) + else: + username = 'unknown' + hostname = socket.gethostname() + file_prefix = '%s.%s.%s.log' % (program_name, hostname, username) + + return actual_log_dir, file_prefix, program_name + + +def find_log_dir(log_dir=None): + """Returns the most suitable directory to put log files into. + + Args: + log_dir: str|None, if specified, the logfile(s) will be created in that + directory. Otherwise if the --log_dir command-line flag is provided, + the logfile will be created in that directory. Otherwise the logfile + will be created in a standard location. + + Raises: + FileNotFoundError: raised in Python 3 when it cannot find a log directory. + OSError: raised in Python 2 when it cannot find a log directory. + """ + # Get a list of possible log dirs (will try to use them in order). + # NOTE: Google's internal implementation has a special handling for Google + # machines, which uses a list of directories. Hence the following uses `dirs` + # instead of a single directory. + if log_dir: + # log_dir was explicitly specified as an arg, so use it and it alone. + dirs = [log_dir] + elif FLAGS['log_dir'].value: + # log_dir flag was provided, so use it and it alone (this mimics the + # behavior of the same flag in logging.cc). + dirs = [FLAGS['log_dir'].value] + else: + dirs = [tempfile.gettempdir()] + + # Find the first usable log dir. + for d in dirs: + if os.path.isdir(d) and os.access(d, os.W_OK): + return d + raise FileNotFoundError( + "Can't find a writable directory for logs, tried %s" % dirs) + + +def get_absl_log_prefix(record): + """Returns the absl log prefix for the log record. + + Args: + record: logging.LogRecord, the record to get prefix for. + """ + created_tuple = time.localtime(record.created) + created_microsecond = int(record.created % 1.0 * 1e6) + + critical_prefix = '' + level = record.levelno + if _is_non_absl_fatal_record(record): + # When the level is FATAL, but not logged from absl, lower the level so + # it's treated as ERROR. + level = logging.ERROR + critical_prefix = _CRITICAL_PREFIX + severity = converter.get_initial_for_level(level) + + return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % ( + severity, + created_tuple.tm_mon, + created_tuple.tm_mday, + created_tuple.tm_hour, + created_tuple.tm_min, + created_tuple.tm_sec, + created_microsecond, + _get_thread_id(), + record.filename, + record.lineno, + critical_prefix) + + +def skip_log_prefix(func): + """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`. + + This is a convenience wrapper function / decorator for + :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`. + + If a callable function is provided, only that function will be skipped. + If a function name is provided, all functions with the same name in the + file that this is called in will be skipped. + + This can be used as a decorator of the intended function to be skipped. + + Args: + func: Callable function or its name as a string. + + Returns: + func (the input, unchanged). + + Raises: + ValueError: The input is callable but does not have a function code object. + TypeError: The input is neither callable nor a string. + """ + if callable(func): + func_code = getattr(func, '__code__', None) + if func_code is None: + raise ValueError('Input callable does not have a function code object.') + file_name = func_code.co_filename + func_name = func_code.co_name + func_lineno = func_code.co_firstlineno + elif isinstance(func, str): + file_name = get_absl_logger().findCaller()[0] + func_name = func + func_lineno = None + else: + raise TypeError('Input is neither callable nor a string.') + ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno) + return func + + +def _is_non_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + not log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +def _is_absl_fatal_record(log_record): + return (log_record.levelno >= logging.FATAL and + log_record.__dict__.get(_ABSL_LOG_FATAL, False)) + + +# Indicates if we still need to warn about pre-init logs going to stderr. +_warn_preinit_stderr = True + + +class PythonHandler(logging.StreamHandler): + """The handler class used by Abseil Python logging implementation.""" + + def __init__(self, stream=None, formatter=None): + super(PythonHandler, self).__init__(stream) + self.setFormatter(formatter or PythonFormatter()) + + def start_logging_to_file(self, program_name=None, log_dir=None): + """Starts logging messages to files instead of standard error.""" + FLAGS.logtostderr = False + + actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names( + program_name=program_name, log_dir=log_dir) + + basename = '%s.INFO.%s.%d' % ( + file_prefix, + time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())), + os.getpid()) + filename = os.path.join(actual_log_dir, basename) + + self.stream = open(filename, 'a', encoding='utf-8') + + # os.symlink is not available on Windows Python 2. + if getattr(os, 'symlink', None): + # Create a symlink to the log file with a canonical name. + symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO') + try: + if os.path.islink(symlink): + os.unlink(symlink) + os.symlink(os.path.basename(filename), symlink) + except EnvironmentError: + # If it fails, we're sad but it's no error. Commonly, this + # fails because the symlink was created by another user and so + # we can't modify it + pass + + def use_absl_log_file(self, program_name=None, log_dir=None): + """Conditionally logs to files, based on --logtostderr.""" + if FLAGS['logtostderr'].value: + self.stream = sys.stderr + else: + self.start_logging_to_file(program_name=program_name, log_dir=log_dir) + + def flush(self): + """Flushes all log files.""" + self.acquire() + try: + if self.stream and hasattr(self.stream, 'flush'): + self.stream.flush() + except (EnvironmentError, ValueError): + # A ValueError is thrown if we try to flush a closed file. + pass + finally: + self.release() + + def _log_to_stderr(self, record): + """Emits the record to stderr. + + This temporarily sets the handler stream to stderr, calls + StreamHandler.emit, then reverts the stream back. + + Args: + record: logging.LogRecord, the record to log. + """ + # emit() is protected by a lock in logging.Handler, so we don't need to + # protect here again. + old_stream = self.stream + self.stream = sys.stderr + try: + super(PythonHandler, self).emit(record) + finally: + self.stream = old_stream + + def emit(self, record): + """Prints a record out to some streams. + + 1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY. + 2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``. + 3. If ``FLAGS.logtostderr`` is not set, it will log to the stream + associated with the current thread. + + Args: + record: :class:`logging.LogRecord`, the record to emit. + """ + # People occasionally call logging functions at import time before + # our flags may have even been defined yet, let alone even parsed, as we + # rely on the C++ side to define some flags for us and app init to + # deal with parsing. Match the C++ library behavior of notify and emit + # such messages to stderr. It encourages people to clean-up and does + # not hide the message. + level = record.levelno + if not FLAGS.is_parsed(): # Also implies "before flag has been defined". + global _warn_preinit_stderr + if _warn_preinit_stderr: + sys.stderr.write( + 'WARNING: Logging before flag parsing goes to stderr.\n') + _warn_preinit_stderr = False + self._log_to_stderr(record) + elif FLAGS['logtostderr'].value: + self._log_to_stderr(record) + else: + super(PythonHandler, self).emit(record) + stderr_threshold = converter.string_to_standard( + FLAGS['stderrthreshold'].value) + if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and + self.stream != sys.stderr): + self._log_to_stderr(record) + # Die when the record is created from ABSLLogger and level is FATAL. + if _is_absl_fatal_record(record): + self.flush() # Flush the log before dying. + + # In threaded python, sys.exit() from a non-main thread only + # exits the thread in question. + os.abort() + + def close(self): + """Closes the stream to which we are writing.""" + self.acquire() + try: + self.flush() + try: + # Do not close the stream if it's sys.stderr|stdout. They may be + # redirected or overridden to files, which should be managed by users + # explicitly. + user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__ + if self.stream not in user_managed and ( + not hasattr(self.stream, 'isatty') or not self.stream.isatty()): + self.stream.close() + except ValueError: + # A ValueError is thrown if we try to run isatty() on a closed file. + pass + super(PythonHandler, self).close() + finally: + self.release() + + +class ABSLHandler(logging.Handler): + """Abseil Python logging module's log handler.""" + + def __init__(self, python_logging_formatter): + super(ABSLHandler, self).__init__() + + self._python_handler = PythonHandler(formatter=python_logging_formatter) + self.activate_python_handler() + + def format(self, record): + return self._current_handler.format(record) + + def setFormatter(self, fmt): + self._current_handler.setFormatter(fmt) + + def emit(self, record): + self._current_handler.emit(record) + + def flush(self): + self._current_handler.flush() + + def close(self): + super(ABSLHandler, self).close() + self._current_handler.close() + + def handle(self, record): + rv = self.filter(record) + if rv: + return self._current_handler.handle(record) + return rv + + @property + def python_handler(self): + return self._python_handler + + def activate_python_handler(self): + """Uses the Python logging handler as the current logging handler.""" + self._current_handler = self._python_handler + + def use_absl_log_file(self, program_name=None, log_dir=None): + self._current_handler.use_absl_log_file(program_name, log_dir) + + def start_logging_to_file(self, program_name=None, log_dir=None): + self._current_handler.start_logging_to_file(program_name, log_dir) + + +class PythonFormatter(logging.Formatter): + """Formatter class used by :class:`~absl.logging.PythonHandler`.""" + + def format(self, record): + """Appends the message from the record to the results of the prefix. + + Args: + record: logging.LogRecord, the record to be formatted. + + Returns: + The formatted string representing the record. + """ + if (not FLAGS['showprefixforinfo'].value and + FLAGS['verbosity'].value == converter.ABSL_INFO and + record.levelno == logging.INFO and + _absl_handler.python_handler.stream == sys.stderr): + prefix = '' + else: + prefix = get_absl_log_prefix(record) + return prefix + super(PythonFormatter, self).format(record) + + +class ABSLLogger(logging.getLoggerClass()): + """A logger that will create LogRecords while skipping some stack frames. + + This class maintains an internal list of filenames and method names + for use when determining who called the currently executing stack + frame. Any method names from specific source files are skipped when + walking backwards through the stack. + + Client code should use the register_frame_to_skip method to let the + ABSLLogger know which method from which file should be + excluded from the walk backwards through the stack. + """ + _frames_to_skip = set() + + def findCaller(self, stack_info=False, stacklevel=1): + """Finds the frame of the calling method on the stack. + + This method skips any frames registered with the + ABSLLogger and any methods from this file, and whatever + method is currently being used to generate the prefix for the log + line. Then it returns the file name, line number, and method name + of the calling method. An optional fourth item may be returned, + callers who only need things from the first three are advised to + always slice or index the result rather than using direct unpacking + assignment. + + Args: + stack_info: bool, when True, include the stack trace as a fourth item + returned. On Python 3 there are always four items returned - the + fourth will be None when this is False. On Python 2 the stdlib + base class API only returns three items. We do the same when this + new parameter is unspecified or False for compatibility. + + Returns: + (filename, lineno, methodname[, sinfo]) of the calling method. + """ + f_to_skip = ABSLLogger._frames_to_skip + # Use sys._getframe(2) instead of logging.currentframe(), it's slightly + # faster because there is one less frame to traverse. + frame = sys._getframe(2) # pylint: disable=protected-access + + while frame: + code = frame.f_code + if (_LOGGING_FILE_PREFIX not in code.co_filename and + (code.co_filename, code.co_name, + code.co_firstlineno) not in f_to_skip and + (code.co_filename, code.co_name) not in f_to_skip): + sinfo = None + if stack_info: + out = io.StringIO() + out.write(u'Stack (most recent call last):\n') + traceback.print_stack(frame, file=out) + sinfo = out.getvalue().rstrip(u'\n') + return (code.co_filename, frame.f_lineno, code.co_name, sinfo) + frame = frame.f_back + + def critical(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``CRITICAL``.""" + self.log(logging.CRITICAL, msg, *args, **kwargs) + + def fatal(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``FATAL``.""" + self.log(logging.FATAL, msg, *args, **kwargs) + + def error(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``ERROR``.""" + self.log(logging.ERROR, msg, *args, **kwargs) + + def warn(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARN``.""" + warnings.warn("The 'warn' method is deprecated, use 'warning' instead", + DeprecationWarning, 2) + self.log(logging.WARN, msg, *args, **kwargs) + + def warning(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``WARNING``.""" + self.log(logging.WARNING, msg, *args, **kwargs) + + def info(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``INFO``.""" + self.log(logging.INFO, msg, *args, **kwargs) + + def debug(self, msg, *args, **kwargs): + """Logs ``msg % args`` with severity ``DEBUG``.""" + self.log(logging.DEBUG, msg, *args, **kwargs) + + def log(self, level, msg, *args, **kwargs): + """Logs a message at a cetain level substituting in the supplied arguments. + + This method behaves differently in python and c++ modes. + + Args: + level: int, the standard logging level at which to log the message. + msg: str, the text of the message to log. + *args: The arguments to substitute in the message. + **kwargs: The keyword arguments to substitute in the message. + """ + if level >= logging.FATAL: + # Add property to the LogRecord created by this logger. + # This will be used by the ABSLHandler to determine whether it should + # treat CRITICAL/FATAL logs as really FATAL. + extra = kwargs.setdefault('extra', {}) + extra[_ABSL_LOG_FATAL] = True + super(ABSLLogger, self).log(level, msg, *args, **kwargs) + + def handle(self, record): + """Calls handlers without checking ``Logger.disabled``. + + Non-root loggers are set to disabled after setup with :func:`logging.config` + if it's not explicitly specified. Historically, absl logging will not be + disabled by that. To maintaining this behavior, this function skips + checking the ``Logger.disabled`` bit. + + This logger can still be disabled by adding a filter that filters out + everything. + + Args: + record: logging.LogRecord, the record to handle. + """ + if self.filter(record): + self.callHandlers(record) + + @classmethod + def register_frame_to_skip(cls, file_name, function_name, line_number=None): + """Registers a function name to skip when walking the stack. + + The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the + stack to make the log messages meaningful in their appropriate context. + This method registers a function from a particular file as one + which should be skipped. + + Args: + file_name: str, the name of the file that contains the function. + function_name: str, the name of the function to skip. + line_number: int, if provided, only the function with this starting line + number will be skipped. Otherwise, all functions with the same name + in the file will be skipped. + """ + if line_number is not None: + cls._frames_to_skip.add((file_name, function_name, line_number)) + else: + cls._frames_to_skip.add((file_name, function_name)) + + +def _get_thread_id(): + """Gets id of current thread, suitable for logging as an unsigned quantity. + + If pywrapbase is linked, returns GetTID() for the thread ID to be + consistent with C++ logging. Otherwise, returns the numeric thread id. + The quantities are made unsigned by masking with 2*sys.maxint + 1. + + Returns: + Thread ID unique to this process (unsigned) + """ + thread_id = threading.get_ident() + return thread_id & _THREAD_ID_MASK + + +def get_absl_logger(): + """Returns the absl logger instance.""" + assert _absl_logger is not None + return _absl_logger + + +def get_absl_handler(): + """Returns the absl handler instance.""" + assert _absl_handler is not None + return _absl_handler + + +def use_python_logging(quiet=False): + """Uses the python implementation of the logging code. + + Args: + quiet: No logging message about switching logging type. + """ + get_absl_handler().activate_python_handler() + if not quiet: + info('Restoring pure python logging') + + +_attempted_to_remove_stderr_stream_handlers = False + + +def use_absl_handler(): + """Uses the ABSL logging handler for logging. + + This method is called in :func:`app.run()` so the absl handler + is used in absl apps. + """ + global _attempted_to_remove_stderr_stream_handlers + if not _attempted_to_remove_stderr_stream_handlers: + # The absl handler logs to stderr by default. To prevent double logging to + # stderr, the following code tries its best to remove other handlers that + # emit to stderr. Those handlers are most commonly added when + # logging.info/debug is called before calling use_absl_handler(). + handlers = [ + h for h in logging.root.handlers + if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr] + for h in handlers: + logging.root.removeHandler(h) + _attempted_to_remove_stderr_stream_handlers = True + + absl_handler = get_absl_handler() + if absl_handler not in logging.root.handlers: + logging.root.addHandler(absl_handler) + FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access + FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access + + +def _initialize(): + """Initializes loggers and handlers.""" + global _absl_logger, _absl_handler + + if _absl_logger: + return + + original_logger_class = logging.getLoggerClass() + logging.setLoggerClass(ABSLLogger) + _absl_logger = logging.getLogger('absl') + logging.setLoggerClass(original_logger_class) + + python_logging_formatter = PythonFormatter() + _absl_handler = ABSLHandler(python_logging_formatter) + + +_initialize() diff --git a/venv/lib/python3.10/site-packages/absl/logging/__init__.pyi b/venv/lib/python3.10/site-packages/absl/logging/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d5bb69d59d8e0c8fdaafaab7b975361031be898 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/logging/__init__.pyi @@ -0,0 +1,290 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, TypeVar, Union + +from absl import flags + +# Logging levels. +FATAL: int +ERROR: int +WARNING: int +WARN: int # Deprecated name. +INFO: int +DEBUG: int + +ABSL_LOGGING_PREFIX_REGEX: str + +LOGTOSTDERR: flags.FlagHolder[bool] +ALSOLOGTOSTDERR: flags.FlagHolder[bool] +LOG_DIR: flags.FlagHolder[str] +VERBOSITY: flags.FlagHolder[int] +LOGGER_LEVELS: flags.FlagHolder[Dict[str, str]] +STDERRTHRESHOLD: flags.FlagHolder[str] +SHOWPREFIXFORINFO: flags.FlagHolder[bool] + + +def get_verbosity() -> int: + ... + + +def set_verbosity(v: Union[int, str]) -> None: + ... + + +def set_stderrthreshold(s: Union[int, str]) -> None: + ... + + +# TODO(b/277607978): Provide actual args+kwargs shadowing stdlib's logging functions. +def fatal(msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + +def error(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warning(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def warn(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def info(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def debug(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def exception(msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def log_every_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_every_n_seconds( + level: int, msg: Any, n_seconds: float, *args: Any +) -> None: + ... + + +def log_first_n(level: int, msg: Any, n: int, *args: Any) -> None: + ... + + +def log_if(level: int, msg: Any, condition: Any, *args: Any) -> None: + ... + + +def log(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog(level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + +def vlog_is_on(level: int) -> bool: + ... + + +def flush() -> None: + ... + + +def level_debug() -> bool: + ... + + +def level_info() -> bool: + ... + + +def level_warning() -> bool: + ... + + +level_warn = level_warning # Deprecated function. + + +def level_error() -> bool: + ... + + +def get_log_file_name(level: int = ...) -> str: + ... + + +def find_log_dir_and_names( + program_name: Optional[str] = ..., log_dir: Optional[str] = ... +) -> Tuple[str, str, str]: + ... + + +def find_log_dir(log_dir: Optional[str] = ...) -> str: + ... + + +def get_absl_log_prefix(record: logging.LogRecord) -> str: + ... + + +_SkipLogT = TypeVar('_SkipLogT', str, Callable[..., Any]) + +def skip_log_prefix(func: _SkipLogT) -> _SkipLogT: + ... + + +_StreamT = TypeVar("_StreamT") + + +class PythonHandler(logging.StreamHandler[_StreamT]): + + def __init__( + self, + stream: Optional[_StreamT] = ..., + formatter: Optional[logging.Formatter] = ..., + ) -> None: + ... + + def start_logging_to_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def flush(self) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def close(self) -> None: + ... + + +class ABSLHandler(logging.Handler): + + def __init__(self, python_logging_formatter: PythonFormatter) -> None: + ... + + def format(self, record: logging.LogRecord) -> str: + ... + + def setFormatter(self, fmt) -> None: + ... + + def emit(self, record: logging.LogRecord) -> None: + ... + + def flush(self) -> None: + ... + + def close(self) -> None: + ... + + def handle(self, record: logging.LogRecord) -> bool: + ... + + @property + def python_handler(self) -> PythonHandler: + ... + + def activate_python_handler(self) -> None: + ... + + def use_absl_log_file( + self, program_name: Optional[str] = ..., log_dir: Optional[str] = ... + ) -> None: + ... + + def start_logging_to_file(self, program_name=None, log_dir=None) -> None: + ... + + +class PythonFormatter(logging.Formatter): + + def format(self, record: logging.LogRecord) -> str: + ... + + +class ABSLLogger(logging.Logger): + + def findCaller( + self, stack_info: bool = ..., stacklevel: int = ... + ) -> Tuple[str, int, str, Optional[str]]: + ... + + def critical(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def fatal(self, msg: Any, *args: Any, **kwargs: Any) -> NoReturn: + ... + + def error(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warn(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def info(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def log(self, level: int, msg: Any, *args: Any, **kwargs: Any) -> None: + ... + + def handle(self, record: logging.LogRecord) -> None: + ... + + @classmethod + def register_frame_to_skip( + cls, file_name: str, function_name: str, line_number: Optional[int] = ... + ) -> None: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_logger() -> ABSLLogger: + ... + + +# NOTE: Returns None before _initialize called but shouldn't occur after import. +def get_absl_handler() -> ABSLHandler: + ... + + +def use_python_logging(quiet: bool = ...) -> None: + ... + + +def use_absl_handler() -> None: + ... diff --git a/venv/lib/python3.10/site-packages/absl/logging/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/logging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca36f10d4a2bb9aaa7486cab69943496ccde4bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/logging/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/logging/__pycache__/converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/logging/__pycache__/converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea51bc5d50b74c754f7eddc1bdd5ca9eab5b2bb1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/logging/__pycache__/converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/logging/converter.py b/venv/lib/python3.10/site-packages/absl/logging/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..0239ab4556458b995f9cbca796281cc44acaf476 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/logging/converter.py @@ -0,0 +1,214 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module to convert log levels between Abseil Python, C++, and Python standard. + +This converter has to convert (best effort) between three different +logging level schemes: + + * **cpp**: The C++ logging level scheme used in Abseil C++. + * **absl**: The absl.logging level scheme used in Abseil Python. + * **standard**: The python standard library logging level scheme. + +Here is a handy ascii chart for easy mental mapping:: + + LEVEL | cpp | absl | standard | + ---------+-----+--------+----------+ + DEBUG | 0 | 1 | 10 | + INFO | 0 | 0 | 20 | + WARNING | 1 | -1 | 30 | + ERROR | 2 | -2 | 40 | + CRITICAL | 3 | -3 | 50 | + FATAL | 3 | -3 | 50 | + +Note: standard logging ``CRITICAL`` is mapped to absl/cpp ``FATAL``. +However, only ``CRITICAL`` logs from the absl logger (or absl.logging.fatal) +will terminate the program. ``CRITICAL`` logs from non-absl loggers are treated +as error logs with a message prefix ``"CRITICAL - "``. + +Converting from standard to absl or cpp is a lossy conversion. +Converting back to standard will lose granularity. For this reason, +users should always try to convert to standard, the richest +representation, before manipulating the levels, and then only to cpp +or absl if those level schemes are absolutely necessary. +""" + +import logging + +STANDARD_CRITICAL = logging.CRITICAL +STANDARD_ERROR = logging.ERROR +STANDARD_WARNING = logging.WARNING +STANDARD_INFO = logging.INFO +STANDARD_DEBUG = logging.DEBUG + +# These levels are also used to define the constants +# FATAL, ERROR, WARNING, INFO, and DEBUG in the +# absl.logging module. +ABSL_FATAL = -3 +ABSL_ERROR = -2 +ABSL_WARNING = -1 +ABSL_WARN = -1 # Deprecated name. +ABSL_INFO = 0 +ABSL_DEBUG = 1 + +ABSL_LEVELS = {ABSL_FATAL: 'FATAL', + ABSL_ERROR: 'ERROR', + ABSL_WARNING: 'WARNING', + ABSL_INFO: 'INFO', + ABSL_DEBUG: 'DEBUG'} + +# Inverts the ABSL_LEVELS dictionary +ABSL_NAMES = {'FATAL': ABSL_FATAL, + 'ERROR': ABSL_ERROR, + 'WARNING': ABSL_WARNING, + 'WARN': ABSL_WARNING, # Deprecated name. + 'INFO': ABSL_INFO, + 'DEBUG': ABSL_DEBUG} + +ABSL_TO_STANDARD = {ABSL_FATAL: STANDARD_CRITICAL, + ABSL_ERROR: STANDARD_ERROR, + ABSL_WARNING: STANDARD_WARNING, + ABSL_INFO: STANDARD_INFO, + ABSL_DEBUG: STANDARD_DEBUG} + +# Inverts the ABSL_TO_STANDARD +STANDARD_TO_ABSL = dict((v, k) for (k, v) in ABSL_TO_STANDARD.items()) + + +def get_initial_for_level(level): + """Gets the initial that should start the log line for the given level. + + It returns: + + * ``'I'`` when: ``level < STANDARD_WARNING``. + * ``'W'`` when: ``STANDARD_WARNING <= level < STANDARD_ERROR``. + * ``'E'`` when: ``STANDARD_ERROR <= level < STANDARD_CRITICAL``. + * ``'F'`` when: ``level >= STANDARD_CRITICAL``. + + Args: + level: int, a Python standard logging level. + + Returns: + The first initial as it would be logged by the C++ logging module. + """ + if level < STANDARD_WARNING: + return 'I' + elif level < STANDARD_ERROR: + return 'W' + elif level < STANDARD_CRITICAL: + return 'E' + else: + return 'F' + + +def absl_to_cpp(level): + """Converts an absl log level to a cpp log level. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in Abseil C++. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level >= 0: + # C++ log levels must be >= 0 + return 0 + else: + return -level + + +def absl_to_standard(level): + """Converts an integer level from the absl value to the standard value. + + Args: + level: int, an absl.logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in standard logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < ABSL_FATAL: + level = ABSL_FATAL + if level <= ABSL_DEBUG: + return ABSL_TO_STANDARD[level] + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + + +def string_to_standard(level): + """Converts a string level to standard logging level value. + + Args: + level: str, case-insensitive ``'debug'``, ``'info'``, ``'warning'``, + ``'error'``, ``'fatal'``. + + Returns: + The corresponding integer level for use in standard logging. + """ + return absl_to_standard(ABSL_NAMES.get(level.upper())) + + +def standard_to_absl(level): + """Converts an integer level from the standard value to the absl value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in absl logging. + """ + if not isinstance(level, int): + raise TypeError('Expect an int level, found {}'.format(type(level))) + if level < 0: + level = 0 + if level < STANDARD_DEBUG: + # Maps to vlog levels. + return STANDARD_DEBUG - level + 1 + elif level < STANDARD_INFO: + return ABSL_DEBUG + elif level < STANDARD_WARNING: + return ABSL_INFO + elif level < STANDARD_ERROR: + return ABSL_WARNING + elif level < STANDARD_CRITICAL: + return ABSL_ERROR + else: + return ABSL_FATAL + + +def standard_to_cpp(level): + """Converts an integer level from the standard value to the cpp value. + + Args: + level: int, a Python standard logging level. + + Raises: + TypeError: Raised when level is not an integer. + + Returns: + The corresponding integer level for use in cpp logging. + """ + return absl_to_cpp(standard_to_absl(level)) diff --git a/venv/lib/python3.10/site-packages/absl/testing/__init__.py b/venv/lib/python3.10/site-packages/absl/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bd1cd51810385ca0e5e9fed3fb9a804febf27e --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ab13ce17cb010995ba7cf0c8d3b5f25dec640da Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8c6b340c62a1f8b64eeef50ce7d47ee54e82f5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_bazelize_command.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbf7fa78acaa57bdcb7b328f65cd571c824165cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/absltest.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/absltest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d207493869487facd75cfd66c4b82934a6a482a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/absltest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/flagsaver.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/flagsaver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..502df97aad932bce161629a09cb157ab627c65e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/flagsaver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..318987444d34b5a7681065928b7f435cf7b0f282 Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/parameterized.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7110e80bcfb2335f06fa7cb779469faf288f582 Binary files /dev/null and b/venv/lib/python3.10/site-packages/absl/testing/__pycache__/xml_reporter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/absl/testing/_bazelize_command.py b/venv/lib/python3.10/site-packages/absl/testing/_bazelize_command.py new file mode 100644 index 0000000000000000000000000000000000000000..9380d27427859371e323e12e0759ff83157223bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/_bazelize_command.py @@ -0,0 +1,68 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helper for running tests on Windows Bazel.""" + +import os + +from absl import flags + +FLAGS = flags.FLAGS + + +def get_executable_path(py_binary_name): + """Returns the executable path of a py_binary. + + This returns the executable path of a py_binary that is in another Bazel + target's data dependencies. + + On Linux/macOS, the path and __file__ has the same root directory. + On Windows, bazel builds an .exe file and we need to use the MANIFEST file + the location the actual binary. + + Args: + py_binary_name: string, the name of a py_binary that is in another Bazel + target's data dependencies. + + Raises: + RuntimeError: Raised when it cannot locate the executable path. + """ + + if os.name == 'nt': + py_binary_name += '.exe' + manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST') + workspace_name = os.environ['TEST_WORKSPACE'] + manifest_entry = '{}/{}'.format(workspace_name, py_binary_name) + with open(manifest_file, 'r') as manifest_fd: + for line in manifest_fd: + tokens = line.strip().split(' ') + if len(tokens) != 2: + continue + if manifest_entry == tokens[0]: + return tokens[1] + raise RuntimeError( + 'Cannot locate executable path for {}, MANIFEST file: {}.'.format( + py_binary_name, manifest_file)) + else: + # NOTE: __file__ may be .py or .pyc, depending on how the module was + # loaded and executed. + path = __file__ + + # Use the package name to find the root directory: every dot is + # a directory, plus one for ourselves. + for _ in range(__name__.count('.') + 1): + path = os.path.dirname(path) + + root_directory = path + return os.path.join(root_directory, py_binary_name) diff --git a/venv/lib/python3.10/site-packages/absl/testing/_pretty_print_reporter.py b/venv/lib/python3.10/site-packages/absl/testing/_pretty_print_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..b0dde07e4f5a98abedf1b3c5f6ac72fe256fa716 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/_pretty_print_reporter.py @@ -0,0 +1,91 @@ +# Copyright 2018 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TestResult implementing default output for test execution status.""" + +import unittest + + +class TextTestResult(unittest.TextTestResult): + """TestResult class that provides the default text result formatting.""" + + def __init__(self, stream, descriptions, verbosity): + # Disable the verbose per-test output from the superclass, since it would + # conflict with our customized output. + super(TextTestResult, self).__init__(stream, descriptions, 0) + self._per_test_output = verbosity > 0 + + def _print_status(self, tag, test): + if self._per_test_output: + test_id = test.id() + if test_id.startswith('__main__.'): + test_id = test_id[len('__main__.'):] + print('[%s] %s' % (tag, test_id), file=self.stream) + self.stream.flush() + + def startTest(self, test): + super(TextTestResult, self).startTest(test) + self._print_status(' RUN ', test) + + def addSuccess(self, test): + super(TextTestResult, self).addSuccess(test) + self._print_status(' OK ', test) + + def addError(self, test, err): + super(TextTestResult, self).addError(test, err) + self._print_status(' FAILED ', test) + + def addFailure(self, test, err): + super(TextTestResult, self).addFailure(test, err) + self._print_status(' FAILED ', test) + + def addSkip(self, test, reason): + super(TextTestResult, self).addSkip(test, reason) + self._print_status(' SKIPPED ', test) + + def addExpectedFailure(self, test, err): + super(TextTestResult, self).addExpectedFailure(test, err) + self._print_status(' OK ', test) + + def addUnexpectedSuccess(self, test): + super(TextTestResult, self).addUnexpectedSuccess(test) + self._print_status(' FAILED ', test) + + +class TextTestRunner(unittest.TextTestRunner): + """A test runner that produces formatted text results.""" + + _TEST_RESULT_CLASS = TextTestResult + + # Set this to true at the class or instance level to run tests using a + # debug-friendly method (e.g, one that doesn't catch exceptions and interacts + # better with debuggers). + # Usually this is set using --pdb_post_mortem. + run_for_debugging = False + + def run(self, test): + # type: (TestCase) -> TestResult + if self.run_for_debugging: + return self._run_debug(test) + else: + return super(TextTestRunner, self).run(test) + + def _run_debug(self, test): + # type: (TestCase) -> TestResult + test.debug() + # Return an empty result to indicate success. + return self._makeResult() + + def _makeResult(self): + return TextTestResult(self.stream, self.descriptions, self.verbosity) diff --git a/venv/lib/python3.10/site-packages/absl/testing/absltest.py b/venv/lib/python3.10/site-packages/absl/testing/absltest.py new file mode 100644 index 0000000000000000000000000000000000000000..e43cb820654f80ca93eb1d71ee2c5f4b1748795d --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/absltest.py @@ -0,0 +1,2713 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base functionality for Abseil Python tests. + +This module contains base classes and high-level functions for Abseil-style +tests. +""" + +from collections import abc +import contextlib +import dataclasses +import difflib +import enum +import errno +import faulthandler +import getpass +import inspect +import io +import itertools +import json +import os +import random +import re +import shlex +import shutil +import signal +import stat +import subprocess +import sys +import tempfile +import textwrap +import typing +from typing import Any, AnyStr, BinaryIO, Callable, ContextManager, IO, Iterator, List, Mapping, MutableMapping, MutableSequence, NoReturn, Optional, Sequence, Text, TextIO, Tuple, Type, Union +import unittest +from unittest import mock # pylint: disable=unused-import Allow absltest.mock. +from urllib import parse + +from absl import app # pylint: disable=g-import-not-at-top +from absl import flags +from absl import logging +from absl.testing import _pretty_print_reporter +from absl.testing import xml_reporter + +# Use an if-type-checking block to prevent leakage of type-checking only +# symbols. We don't want people relying on these at runtime. +if typing.TYPE_CHECKING: + # Unbounded TypeVar for general usage + _T = typing.TypeVar('_T') + + import unittest.case # pylint: disable=g-import-not-at-top,g-bad-import-order + + _OutcomeType = unittest.case._Outcome # pytype: disable=module-attr + + +# pylint: enable=g-import-not-at-top + +# Re-export a bunch of unittest functions we support so that people don't +# have to import unittest to get them +# pylint: disable=invalid-name +skip = unittest.skip +skipIf = unittest.skipIf +skipUnless = unittest.skipUnless +SkipTest = unittest.SkipTest +expectedFailure = unittest.expectedFailure +# pylint: enable=invalid-name + +# End unittest re-exports + +FLAGS = flags.FLAGS + +_TEXT_OR_BINARY_TYPES = (str, bytes) + +# Suppress surplus entries in AssertionError stack traces. +__unittest = True # pylint: disable=invalid-name + + +def expectedFailureIf(condition, reason): # pylint: disable=invalid-name + """Expects the test to fail if the run condition is True. + + Example usage:: + + @expectedFailureIf(sys.version.major == 2, "Not yet working in py2") + def test_foo(self): + ... + + Args: + condition: bool, whether to expect failure or not. + reason: Text, the reason to expect failure. + Returns: + Decorator function + """ + del reason # Unused + if condition: + return unittest.expectedFailure + else: + return lambda f: f + + +class TempFileCleanup(enum.Enum): + # Always cleanup temp files when the test completes. + ALWAYS = 'always' + # Only cleanup temp file if the test passes. This allows easier inspection + # of tempfile contents on test failure. absltest.TEST_TMPDIR.value determines + # where tempfiles are created. + SUCCESS = 'success' + # Never cleanup temp files. + OFF = 'never' + + +# Many of the methods in this module have names like assertSameElements. +# This kind of name does not comply with PEP8 style, +# but it is consistent with the naming of methods in unittest.py. +# pylint: disable=invalid-name + + +def _get_default_test_random_seed(): + # type: () -> int + random_seed = 301 + value = os.environ.get('TEST_RANDOM_SEED', '') + try: + random_seed = int(value) + except ValueError: + pass + return random_seed + + +def get_default_test_srcdir(): + # type: () -> Text + """Returns default test source dir.""" + return os.environ.get('TEST_SRCDIR', '') + + +def get_default_test_tmpdir(): + # type: () -> Text + """Returns default test temp dir.""" + tmpdir = os.environ.get('TEST_TMPDIR', '') + if not tmpdir: + tmpdir = os.path.join(tempfile.gettempdir(), 'absl_testing') + + return tmpdir + + +def _get_default_randomize_ordering_seed(): + # type: () -> int + """Returns default seed to use for randomizing test order. + + This function first checks the --test_randomize_ordering_seed flag, and then + the TEST_RANDOMIZE_ORDERING_SEED environment variable. If the first value + we find is: + * (not set): disable test randomization + * 0: disable test randomization + * 'random': choose a random seed in [1, 4294967295] for test order + randomization + * positive integer: use this seed for test order randomization + + (The values used are patterned after + https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED). + + In principle, it would be simpler to return None if no override is provided; + however, the python random module has no `get_seed()`, only `getstate()`, + which returns far more data than we want to pass via an environment variable + or flag. + + Returns: + A default value for test case randomization (int). 0 means do not randomize. + + Raises: + ValueError: Raised when the flag or env value is not one of the options + above. + """ + if FLAGS['test_randomize_ordering_seed'].present: + randomize = FLAGS.test_randomize_ordering_seed + elif 'TEST_RANDOMIZE_ORDERING_SEED' in os.environ: + randomize = os.environ['TEST_RANDOMIZE_ORDERING_SEED'] + else: + randomize = '' + if not randomize: + return 0 + if randomize == 'random': + return random.Random().randint(1, 4294967295) + if randomize == '0': + return 0 + try: + seed = int(randomize) + if seed > 0: + return seed + except ValueError: + pass + raise ValueError( + 'Unknown test randomization seed value: {}'.format(randomize)) + + +TEST_SRCDIR = flags.DEFINE_string( + 'test_srcdir', + get_default_test_srcdir(), + 'Root of directory tree where source files live', + allow_override_cpp=True) +TEST_TMPDIR = flags.DEFINE_string( + 'test_tmpdir', + get_default_test_tmpdir(), + 'Directory for temporary testing files', + allow_override_cpp=True) + +flags.DEFINE_integer( + 'test_random_seed', + _get_default_test_random_seed(), + 'Random seed for testing. Some test frameworks may ' + 'change the default value of this flag between runs, so ' + 'it is not appropriate for seeding probabilistic tests.', + allow_override_cpp=True) +flags.DEFINE_string( + 'test_randomize_ordering_seed', + '', + 'If positive, use this as a seed to randomize the ' + 'execution order for test cases. If "random", pick a ' + 'random seed to use. If 0 or not set, do not randomize ' + 'test case execution order. This flag also overrides ' + 'the TEST_RANDOMIZE_ORDERING_SEED environment variable.', + allow_override_cpp=True) +flags.DEFINE_string('xml_output_file', '', 'File to store XML test results') + + +# We might need to monkey-patch TestResult so that it stops considering an +# unexpected pass as a as a "successful result". For details, see +# http://bugs.python.org/issue20165 +def _monkey_patch_test_result_for_unexpected_passes(): + # type: () -> None + """Workaround for .""" + + def wasSuccessful(self): + # type: () -> bool + """Tells whether or not this result was a success. + + Any unexpected pass is to be counted as a non-success. + + Args: + self: The TestResult instance. + + Returns: + Whether or not this result was a success. + """ + return (len(self.failures) == len(self.errors) == + len(self.unexpectedSuccesses) == 0) + + test_result = unittest.TestResult() + test_result.addUnexpectedSuccess(unittest.FunctionTestCase(lambda: None)) + if test_result.wasSuccessful(): # The bug is present. + unittest.TestResult.wasSuccessful = wasSuccessful + if test_result.wasSuccessful(): # Warn the user if our hot-fix failed. + sys.stderr.write('unittest.result.TestResult monkey patch to report' + ' unexpected passes as failures did not work.\n') + + +_monkey_patch_test_result_for_unexpected_passes() + + +def _open(filepath, mode, _open_func=open): + # type: (Text, Text, Callable[..., IO]) -> IO + """Opens a file. + + Like open(), but ensure that we can open real files even if tests stub out + open(). + + Args: + filepath: A filepath. + mode: A mode. + _open_func: A built-in open() function. + + Returns: + The opened file object. + """ + return _open_func(filepath, mode, encoding='utf-8') + + +class _TempDir(object): + """Represents a temporary directory for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Module-private: do not instantiate outside module.""" + self._path = path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the directory. + + TIP: Instead of e.g. `os.path.join(temp_dir.full_path)`, you can simply + do `os.path.join(temp_dir)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def create_file(self, file_path=None, content=None, mode='w', encoding='utf8', + errors='strict'): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text) -> _TempFile + """Create a file in the directory. + + NOTE: If the file already exists, it will be made writable and overwritten. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path + is the path that will be cleaned up, including any directories in the + path, e.g., 'foo/bar/baz.txt' will `rm -r foo` + content: Optional string or bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + + Returns: + A _TempFile representing the created file. + """ + tf, _ = _TempFile._create(self._path, file_path, content, mode, encoding, + errors) + return tf + + def mkdir(self, dir_path=None): + # type: (Optional[Text]) -> _TempDir + """Create a directory in the directory. + + Args: + dir_path: Optional path to the directory to create. If not given, + a unique name will be generated and used. + + Returns: + A _TempDir representing the created directory. + """ + if dir_path: + path = os.path.join(self._path, dir_path) + else: + path = tempfile.mkdtemp(dir=self._path) + + # Note: there's no need to clear the directory since the containing + # dir was cleared by the tempdir() function. + os.makedirs(path, exist_ok=True) + return _TempDir(path) + + +class _TempFile(object): + """Represents a tempfile for tests. + + Creation of this class is internal. Using its public methods is OK. + + This class implements the `os.PathLike` interface (specifically, + `os.PathLike[str]`). This means, in Python 3, it can be directly passed + to e.g. `os.path.join()`. + """ + + def __init__(self, path): + # type: (Text) -> None + """Private: use _create instead.""" + self._path = path + + # pylint: disable=line-too-long + @classmethod + def _create(cls, base_path, file_path, content, mode, encoding, errors): + # type: (Text, Optional[Text], AnyStr, Text, Text, Text) -> Tuple[_TempFile, Text] + # pylint: enable=line-too-long + """Module-private: create a tempfile instance.""" + if file_path: + cleanup_path = os.path.join(base_path, _get_first_part(file_path)) + path = os.path.join(base_path, file_path) + os.makedirs(os.path.dirname(path), exist_ok=True) + # The file may already exist, in which case, ensure it's writable so that + # it can be truncated. + if os.path.exists(path) and not os.access(path, os.W_OK): + stat_info = os.stat(path) + os.chmod(path, stat_info.st_mode | stat.S_IWUSR) + else: + os.makedirs(base_path, exist_ok=True) + fd, path = tempfile.mkstemp(dir=str(base_path)) + os.close(fd) + cleanup_path = path + + tf = cls(path) + + if content: + if isinstance(content, str): + tf.write_text(content, mode=mode, encoding=encoding, errors=errors) + else: + tf.write_bytes(content, mode) + + else: + tf.write_bytes(b'') + + return tf, cleanup_path + + @property + def full_path(self): + # type: () -> Text + """Returns the path, as a string, for the file. + + TIP: Instead of e.g. `os.path.join(temp_file.full_path)`, you can simply + do `os.path.join(temp_file)` because `__fspath__()` is implemented. + """ + return self._path + + def __fspath__(self): + # type: () -> Text + """See os.PathLike.""" + return self.full_path + + def read_text(self, encoding='utf8', errors='strict'): + # type: (Text, Text) -> Text + """Return the contents of the file as text.""" + with self.open_text(encoding=encoding, errors=errors) as fp: + return fp.read() + + def read_bytes(self): + # type: () -> bytes + """Return the content of the file as bytes.""" + with self.open_bytes() as fp: + return fp.read() + + def write_text(self, text, mode='w', encoding='utf8', errors='strict'): + # type: (Text, Text, Text, Text) -> None + """Write text to the file. + + Args: + text: Text to write. In Python 2, it can be bytes, which will be + decoded using the `encoding` arg (this is as an aid for code that + is 2 and 3 compatible). + mode: The mode to open the file for writing. + encoding: The encoding to use when writing the text to the file. + errors: The error handling strategy to use when converting text to bytes. + """ + with self.open_text(mode, encoding=encoding, errors=errors) as fp: + fp.write(text) + + def write_bytes(self, data, mode='wb'): + # type: (bytes, Text) -> None + """Write bytes to the file. + + Args: + data: bytes to write. + mode: Mode to open the file for writing. The "b" flag is implicit if + not already present. It must not have the "t" flag. + """ + with self.open_bytes(mode) as fp: + fp.write(data) + + def open_text(self, mode='rt', encoding='utf8', errors='strict'): + # type: (Text, Text, Text) -> ContextManager[TextIO] + """Return a context manager for opening the file in text mode. + + Args: + mode: The mode to open the file in. The "t" flag is implicit if not + already present. It must not have the "b" flag. + encoding: The encoding to use when opening the file. + errors: How to handle decoding errors. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 'b' in mode: + raise ValueError('Invalid mode {!r}: "b" flag not allowed when opening ' + 'file in text mode'.format(mode)) + if 't' not in mode: + mode += 't' + cm = self._open(mode, encoding, errors) + return cm + + def open_bytes(self, mode='rb'): + # type: (Text) -> ContextManager[BinaryIO] + """Return a context manager for opening the file in binary mode. + + Args: + mode: The mode to open the file in. The "b" mode is implicit if not + already present. It must not have the "t" flag. + + Returns: + Context manager that yields an open file. + + Raises: + ValueError: if invalid inputs are provided. + """ + if 't' in mode: + raise ValueError('Invalid mode {!r}: "t" flag not allowed when opening ' + 'file in binary mode'.format(mode)) + if 'b' not in mode: + mode += 'b' + cm = self._open(mode, encoding=None, errors=None) + return cm + + # TODO(b/123775699): Once pytype supports typing.Literal, use overload and + # Literal to express more precise return types. The contained type is + # currently `Any` to avoid [bad-return-type] errors in the open_* methods. + @contextlib.contextmanager + def _open( + self, + mode: str, + encoding: Optional[str] = 'utf8', + errors: Optional[str] = 'strict', + ) -> Iterator[Any]: + with io.open( + self.full_path, mode=mode, encoding=encoding, errors=errors) as fp: + yield fp + + +class _method(object): + """A decorator that supports both instance and classmethod invocations. + + Using similar semantics to the @property builtin, this decorator can augment + an instance method to support conditional logic when invoked on a class + object. This breaks support for invoking an instance method via the class + (e.g. Cls.method(self, ...)) but is still situationally useful. + """ + + def __init__(self, finstancemethod): + # type: (Callable[..., Any]) -> None + self._finstancemethod = finstancemethod + self._fclassmethod = None + + def classmethod(self, fclassmethod): + # type: (Callable[..., Any]) -> _method + self._fclassmethod = classmethod(fclassmethod) + return self + + def __doc__(self): + # type: () -> str + if getattr(self._finstancemethod, '__doc__'): + return self._finstancemethod.__doc__ + elif getattr(self._fclassmethod, '__doc__'): + return self._fclassmethod.__doc__ + return '' + + def __get__(self, obj, type_): + # type: (Optional[Any], Optional[Type[Any]]) -> Callable[..., Any] + func = self._fclassmethod if obj is None else self._finstancemethod + return func.__get__(obj, type_) # pytype: disable=attribute-error + + +class TestCase(unittest.TestCase): + """Extension of unittest.TestCase providing more power.""" + + # When to cleanup files/directories created by our `create_tempfile()` and + # `create_tempdir()` methods after each test case completes. This does *not* + # affect e.g., files created outside of those methods, e.g., using the stdlib + # tempfile module. This can be overridden at the class level, instance level, + # or with the `cleanup` arg of `create_tempfile()` and `create_tempdir()`. See + # `TempFileCleanup` for details on the different values. + # TODO(b/70517332): Remove the type comment and the disable once pytype has + # better support for enums. + tempfile_cleanup = TempFileCleanup.ALWAYS # type: TempFileCleanup # pytype: disable=annotation-type-mismatch + + maxDiff = 80 * 20 + longMessage = True + + # Exit stacks for per-test and per-class scopes. + if sys.version_info < (3, 11): + _exit_stack = None + _cls_exit_stack = None + + def __init__(self, *args, **kwargs): + super(TestCase, self).__init__(*args, **kwargs) + # This is to work around missing type stubs in unittest.pyi + self._outcome = getattr(self, '_outcome') # type: Optional[_OutcomeType] + + def setUp(self): + super(TestCase, self).setUp() + # NOTE: Only Python 3 contextlib has ExitStack and + # Python 3.11+ already has enterContext. + if hasattr(contextlib, 'ExitStack') and sys.version_info < (3, 11): + self._exit_stack = contextlib.ExitStack() + self.addCleanup(self._exit_stack.close) + + @classmethod + def setUpClass(cls): + super(TestCase, cls).setUpClass() + # NOTE: Only Python 3 contextlib has ExitStack, only Python 3.8+ has + # addClassCleanup and Python 3.11+ already has enterClassContext. + if ( + hasattr(contextlib, 'ExitStack') + and hasattr(cls, 'addClassCleanup') + and sys.version_info < (3, 11) + ): + cls._cls_exit_stack = contextlib.ExitStack() + cls.addClassCleanup(cls._cls_exit_stack.close) + + def create_tempdir(self, name=None, cleanup=None): + # type: (Optional[Text], Optional[TempFileCleanup]) -> _TempDir + """Create a temporary directory specific to the test. + + NOTE: The directory and its contents will be recursively cleared before + creation. This ensures that there is no pre-existing state. + + This creates a named directory on disk that is isolated to this test, and + will be properly cleaned up by the test. This avoids several pitfalls of + creating temporary directories for test purposes, as well as makes it easier + to setup directories and verify their contents. For example:: + + def test_foo(self): + out_dir = self.create_tempdir() + out_log = out_dir.create_file('output.log') + expected_outputs = [ + os.path.join(out_dir, 'data-0.txt'), + os.path.join(out_dir, 'data-1.txt'), + ] + code_under_test(out_dir) + self.assertTrue(os.path.exists(expected_paths[0])) + self.assertTrue(os.path.exists(expected_paths[1])) + self.assertEqual('foo', out_log.read_text()) + + See also: :meth:`create_tempfile` for creating temporary files. + + Args: + name: Optional name of the directory. If not given, a unique + name will be generated and used. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempDir representing the created directory; see _TempDir class docs + for usage. + """ + test_path = self._get_tempdir_path_test() + + if name: + path = os.path.join(test_path, name) + cleanup_path = os.path.join(test_path, _get_first_part(name)) + else: + os.makedirs(test_path, exist_ok=True) + path = tempfile.mkdtemp(dir=test_path) + cleanup_path = path + + _rmtree_ignore_errors(cleanup_path) + os.makedirs(path, exist_ok=True) + + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + + return _TempDir(path) + + # pylint: disable=line-too-long + def create_tempfile(self, file_path=None, content=None, mode='w', + encoding='utf8', errors='strict', cleanup=None): + # type: (Optional[Text], Optional[AnyStr], Text, Text, Text, Optional[TempFileCleanup]) -> _TempFile + # pylint: enable=line-too-long + """Create a temporary file specific to the test. + + This creates a named file on disk that is isolated to this test, and will + be properly cleaned up by the test. This avoids several pitfalls of + creating temporary files for test purposes, as well as makes it easier + to setup files, their data, read them back, and inspect them when + a test fails. For example:: + + def test_foo(self): + output = self.create_tempfile() + code_under_test(output) + self.assertGreater(os.path.getsize(output), 0) + self.assertEqual('foo', output.read_text()) + + NOTE: This will zero-out the file. This ensures there is no pre-existing + state. + NOTE: If the file already exists, it will be made writable and overwritten. + + See also: :meth:`create_tempdir` for creating temporary directories, and + ``_TempDir.create_file`` for creating files within a temporary directory. + + Args: + file_path: Optional file path for the temp file. If not given, a unique + file name will be generated and used. Slashes are allowed in the name; + any missing intermediate directories will be created. NOTE: This path is + the path that will be cleaned up, including any directories in the path, + e.g., ``'foo/bar/baz.txt'`` will ``rm -r foo``. + content: Optional string or + bytes to initially write to the file. If not + specified, then an empty file is created. + mode: Mode string to use when writing content. Only used if `content` is + non-empty. + encoding: Encoding to use when writing string content. Only used if + `content` is text. + errors: How to handle text to bytes encoding errors. Only used if + `content` is text. + cleanup: Optional cleanup policy on when/if to remove the directory (and + all its contents) at the end of the test. If None, then uses + :attr:`tempfile_cleanup`. + + Returns: + A _TempFile representing the created file; see _TempFile class docs for + usage. + """ + test_path = self._get_tempdir_path_test() + tf, cleanup_path = _TempFile._create(test_path, file_path, content=content, + mode=mode, encoding=encoding, + errors=errors) + self._maybe_add_temp_path_cleanup(cleanup_path, cleanup) + return tf + + @_method + def enter_context(self, manager): + # type: (ContextManager[_T]) -> _T + """Returns the CM's value after registering it with the exit stack. + + Entering a context pushes it onto a stack of contexts. When `enter_context` + is called on the test instance (e.g. `self.enter_context`), the context is + exited after the test case's tearDown call. When called on the test class + (e.g. `TestCase.enter_context`), the context is exited after the test + class's tearDownClass call. + + Contexts are exited in the reverse order of entering. They will always + be exited, regardless of test failure/success. + + This is useful to eliminate per-test boilerplate when context managers + are used. For example, instead of decorating every test with `@mock.patch`, + simply do `self.foo = self.enter_context(mock.patch(...))' in `setUp()`. + + NOTE: The context managers will always be exited without any error + information. This is an unfortunate implementation detail due to some + internals of how unittest runs tests. + + Args: + manager: The context manager to enter. + """ + if sys.version_info >= (3, 11): + return self.enterContext(manager) + + if not self._exit_stack: + raise AssertionError( + 'self._exit_stack is not set: enter_context is Py3-only; also make ' + 'sure that AbslTest.setUp() is called.') + return self._exit_stack.enter_context(manager) + + @enter_context.classmethod + def enter_context(cls, manager): # pylint: disable=no-self-argument + # type: (ContextManager[_T]) -> _T + if sys.version_info >= (3, 11): + return cls.enterClassContext(manager) + + if not cls._cls_exit_stack: + raise AssertionError( + 'cls._cls_exit_stack is not set: cls.enter_context requires ' + 'Python 3.8+; also make sure that AbslTest.setUpClass() is called.') + return cls._cls_exit_stack.enter_context(manager) + + @classmethod + def _get_tempdir_path_cls(cls): + # type: () -> Text + return os.path.join(TEST_TMPDIR.value, + cls.__qualname__.replace('__main__.', '')) + + def _get_tempdir_path_test(self): + # type: () -> Text + return os.path.join(self._get_tempdir_path_cls(), self._testMethodName) + + def _get_tempfile_cleanup(self, override): + # type: (Optional[TempFileCleanup]) -> TempFileCleanup + if override is not None: + return override + return self.tempfile_cleanup + + def _maybe_add_temp_path_cleanup(self, path, cleanup): + # type: (Text, Optional[TempFileCleanup]) -> None + cleanup = self._get_tempfile_cleanup(cleanup) + if cleanup == TempFileCleanup.OFF: + return + elif cleanup == TempFileCleanup.ALWAYS: + self.addCleanup(_rmtree_ignore_errors, path) + elif cleanup == TempFileCleanup.SUCCESS: + self._internal_add_cleanup_on_success(_rmtree_ignore_errors, path) + else: + raise AssertionError('Unexpected cleanup value: {}'.format(cleanup)) + + def _internal_add_cleanup_on_success( + self, + function: Callable[..., Any], + *args: Any, + **kwargs: Any, + ) -> None: + """Adds `function` as cleanup when the test case succeeds.""" + outcome = self._outcome + assert outcome is not None + previous_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + def _call_cleaner_on_success(*args, **kwargs): + if not self._internal_ran_and_passed_when_called_during_cleanup( + previous_failure_count): + return + function(*args, **kwargs) + self.addCleanup(_call_cleaner_on_success, *args, **kwargs) + + def _internal_ran_and_passed_when_called_during_cleanup( + self, + previous_failure_count: int, + ) -> bool: + """Returns whether test is passed. Expected to be called during cleanup.""" + outcome = self._outcome + if sys.version_info[:2] >= (3, 11): + assert outcome is not None + current_failure_count = ( + len(outcome.result.failures) + + len(outcome.result.errors) + + len(outcome.result.unexpectedSuccesses) + ) + return current_failure_count == previous_failure_count + else: + # Before Python 3.11 https://github.com/python/cpython/pull/28180, errors + # were bufferred in _Outcome before calling cleanup. + result = self.defaultTestResult() + self._feedErrorsToResult(result, outcome.errors) # pytype: disable=attribute-error + return result.wasSuccessful() + + def shortDescription(self): + # type: () -> Text + """Formats both the test method name and the first line of its docstring. + + If no docstring is given, only returns the method name. + + This method overrides unittest.TestCase.shortDescription(), which + only returns the first line of the docstring, obscuring the name + of the test upon failure. + + Returns: + desc: A short description of a test method. + """ + desc = self.id() + + # Omit the main name so that test name can be directly copy/pasted to + # the command line. + if desc.startswith('__main__.'): + desc = desc[len('__main__.'):] + + # NOTE: super() is used here instead of directly invoking + # unittest.TestCase.shortDescription(self), because of the + # following line that occurs later on: + # unittest.TestCase = TestCase + # Because of this, direct invocation of what we think is the + # superclass will actually cause infinite recursion. + doc_first_line = super(TestCase, self).shortDescription() + if doc_first_line is not None: + desc = '\n'.join((desc, doc_first_line)) + return desc + + def assertStartsWith(self, actual, expected_start, msg=None): + """Asserts that actual.startswith(expected_start) is True. + + Args: + actual: str + expected_start: str + msg: Optional message to report on failure. + """ + if not actual.startswith(expected_start): + self.fail('%r does not start with %r' % (actual, expected_start), msg) + + def assertNotStartsWith(self, actual, unexpected_start, msg=None): + """Asserts that actual.startswith(unexpected_start) is False. + + Args: + actual: str + unexpected_start: str + msg: Optional message to report on failure. + """ + if actual.startswith(unexpected_start): + self.fail('%r does start with %r' % (actual, unexpected_start), msg) + + def assertEndsWith(self, actual, expected_end, msg=None): + """Asserts that actual.endswith(expected_end) is True. + + Args: + actual: str + expected_end: str + msg: Optional message to report on failure. + """ + if not actual.endswith(expected_end): + self.fail('%r does not end with %r' % (actual, expected_end), msg) + + def assertNotEndsWith(self, actual, unexpected_end, msg=None): + """Asserts that actual.endswith(unexpected_end) is False. + + Args: + actual: str + unexpected_end: str + msg: Optional message to report on failure. + """ + if actual.endswith(unexpected_end): + self.fail('%r does end with %r' % (actual, unexpected_end), msg) + + def assertSequenceStartsWith(self, prefix, whole, msg=None): + """An equality assertion for the beginning of ordered sequences. + + If prefix is an empty sequence, it will raise an error unless whole is also + an empty sequence. + + If prefix is not a sequence, it will raise an error if the first element of + whole does not match. + + Args: + prefix: A sequence expected at the beginning of the whole parameter. + whole: The sequence in which to look for prefix. + msg: Optional message to report on failure. + """ + try: + prefix_len = len(prefix) + except (TypeError, NotImplementedError): + prefix = [prefix] + prefix_len = 1 + + if isinstance(whole, abc.Mapping) or isinstance(whole, abc.Set): + self.fail( + 'For whole: Mapping or Set objects are not supported, found type: %s' + % type(whole), + msg, + ) + try: + whole_len = len(whole) + except (TypeError, NotImplementedError): + self.fail('For whole: len(%s) is not supported, it appears to be type: ' + '%s' % (whole, type(whole)), msg) + + assert prefix_len <= whole_len, self._formatMessage( + msg, + 'Prefix length (%d) is longer than whole length (%d).' % + (prefix_len, whole_len) + ) + + if not prefix_len and whole_len: + self.fail('Prefix length is 0 but whole length is %d: %s' % + (len(whole), whole), msg) + + try: + self.assertSequenceEqual(prefix, whole[:prefix_len], msg) + except AssertionError: + self.fail('prefix: %s not found at start of whole: %s.' % + (prefix, whole), msg) + + def assertEmpty(self, container, msg=None): + """Asserts that an object has zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of {}.'.format(container, len(container)), msg) + + def assertNotEmpty(self, container, msg=None): + """Asserts that an object has non-zero length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + + # explicitly check the length since some Sized objects (e.g. numpy.ndarray) + # have strange __nonzero__/__bool__ behavior. + if not len(container): # pylint: disable=g-explicit-length-test + self.fail('{!r} has length of 0.'.format(container), msg) + + def assertLen(self, container, expected_len, msg=None): + """Asserts that an object has the expected length. + + Args: + container: Anything that implements the collections.abc.Sized interface. + expected_len: The expected length of the container. + msg: Optional message to report on failure. + """ + if not isinstance(container, abc.Sized): + self.fail('Expected a Sized object, got: ' + '{!r}'.format(type(container).__name__), msg) + if len(container) != expected_len: + container_repr = unittest.util.safe_repr(container) # pytype: disable=module-attr + self.fail('{} has length of {}, expected {}.'.format( + container_repr, len(container), expected_len), msg) + + def assertSequenceAlmostEqual(self, expected_seq, actual_seq, places=None, + msg=None, delta=None): + """An approximate equality assertion for ordered sequences. + + Fail if the two sequences are unequal as determined by their value + differences rounded to the given number of decimal places (default 7) and + comparing to zero, or by comparing that the difference between each value + in the two sequences is more than the given delta. + + Note that decimal places (from zero) are usually not the same as significant + digits (measured from the most significant digit). + + If the two sequences compare equal then they will automatically compare + almost equal. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + places: The number of decimal places to compare. + msg: The message to be printed if the test fails. + delta: The OK difference between compared values. + """ + if len(expected_seq) != len(actual_seq): + self.fail('Sequence size mismatch: {} vs {}'.format( + len(expected_seq), len(actual_seq)), msg) + + err_list = [] + for idx, (exp_elem, act_elem) in enumerate(zip(expected_seq, actual_seq)): + try: + # assertAlmostEqual should be called with at most one of `places` and + # `delta`. However, it's okay for assertSequenceAlmostEqual to pass + # both because we want the latter to fail if the former does. + # pytype: disable=wrong-keyword-args + self.assertAlmostEqual(exp_elem, act_elem, places=places, msg=msg, + delta=delta) + # pytype: enable=wrong-keyword-args + except self.failureException as err: + err_list.append('At index {}: {}'.format(idx, err)) + + if err_list: + if len(err_list) > 30: + err_list = err_list[:30] + ['...'] + msg = self._formatMessage(msg, '\n'.join(err_list)) + self.fail(msg) + + def assertContainsSubset(self, expected_subset, actual_set, msg=None): + """Checks whether actual iterable is a superset of expected iterable.""" + missing = set(expected_subset) - set(actual_set) + if not missing: + return + + self.fail('Missing elements %s\nExpected: %s\nActual: %s' % ( + missing, expected_subset, actual_set), msg) + + def assertNoCommonElements(self, expected_seq, actual_seq, msg=None): + """Checks whether actual iterable and expected iterable are disjoint.""" + common = set(expected_seq) & set(actual_seq) + if not common: + return + + self.fail('Common elements %s\nExpected: %s\nActual: %s' % ( + common, expected_seq, actual_seq), msg) + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + """Deprecated, please use assertCountEqual instead. + + This is equivalent to assertCountEqual. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + super().assertCountEqual(expected_seq, actual_seq, msg) + + def assertSameElements(self, expected_seq, actual_seq, msg=None): + """Asserts that two sequences have the same elements (in any order). + + This method, unlike assertCountEqual, doesn't care about any + duplicates in the expected and actual sequences:: + + # Doesn't raise an AssertionError + assertSameElements([1, 1, 1, 0, 0, 0], [0, 1]) + + If possible, you should use assertCountEqual instead of + assertSameElements. + + Args: + expected_seq: A sequence containing elements we are expecting. + actual_seq: The sequence that we are testing. + msg: The message to be printed if the test fails. + """ + # `unittest2.TestCase` used to have assertSameElements, but it was + # removed in favor of assertItemsEqual. As there's a unit test + # that explicitly checks this behavior, I am leaving this method + # alone. + # Fail on strings: empirically, passing strings to this test method + # is almost always a bug. If comparing the character sets of two strings + # is desired, cast the inputs to sets or lists explicitly. + if (isinstance(expected_seq, _TEXT_OR_BINARY_TYPES) or + isinstance(actual_seq, _TEXT_OR_BINARY_TYPES)): + self.fail('Passing string/bytes to assertSameElements is usually a bug. ' + 'Did you mean to use assertEqual?\n' + 'Expected: %s\nActual: %s' % (expected_seq, actual_seq)) + try: + expected = dict([(element, None) for element in expected_seq]) + actual = dict([(element, None) for element in actual_seq]) + missing = [element for element in expected if element not in actual] + unexpected = [element for element in actual if element not in expected] + missing.sort() + unexpected.sort() + except TypeError: + # Fall back to slower list-compare if any of the objects are + # not hashable. + expected = list(expected_seq) + actual = list(actual_seq) + expected.sort() + actual.sort() + missing, unexpected = _sorted_list_difference(expected, actual) + errors = [] + if msg: + errors.extend((msg, ':\n')) + if missing: + errors.append('Expected, but missing:\n %r\n' % missing) + if unexpected: + errors.append('Unexpected, but present:\n %r\n' % unexpected) + if missing or unexpected: + self.fail(''.join(errors)) + + # unittest.TestCase.assertMultiLineEqual works very similarly, but it + # has a different error format. However, I find this slightly more readable. + def assertMultiLineEqual(self, first, second, msg=None, **kwargs): + """Asserts that two multi-line strings are equal.""" + assert isinstance(first, + str), ('First argument is not a string: %r' % (first,)) + assert isinstance(second, + str), ('Second argument is not a string: %r' % (second,)) + line_limit = kwargs.pop('line_limit', 0) + if kwargs: + raise TypeError('Unexpected keyword args {}'.format(tuple(kwargs))) + + if first == second: + return + if msg: + failure_message = [msg + ':\n'] + else: + failure_message = ['\n'] + if line_limit: + line_limit += len(failure_message) + for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)): + failure_message.append(line) + if not line.endswith('\n'): + failure_message.append('\n') + if line_limit and len(failure_message) > line_limit: + n_omitted = len(failure_message) - line_limit + failure_message = failure_message[:line_limit] + failure_message.append( + '(... and {} more delta lines omitted for brevity.)\n'.format( + n_omitted)) + + raise self.failureException(''.join(failure_message)) + + def assertBetween(self, value, minv, maxv, msg=None): + """Asserts that value is between minv and maxv (inclusive).""" + msg = self._formatMessage(msg, + '"%r" unexpectedly not between "%r" and "%r"' % + (value, minv, maxv)) + self.assertTrue(minv <= value, msg) + self.assertTrue(maxv >= value, msg) + + def assertRegexMatch(self, actual_str, regexes, message=None): + r"""Asserts that at least one regex in regexes matches str. + + If possible you should use `assertRegex`, which is a simpler + version of this method. `assertRegex` takes a single regular + expression (a string or re compiled object) instead of a list. + + Notes: + + 1. This function uses substring matching, i.e. the matching + succeeds if *any* substring of the error message matches *any* + regex in the list. This is more convenient for the user than + full-string matching. + + 2. If regexes is the empty list, the matching will always fail. + + 3. Use regexes=[''] for a regex that will always pass. + + 4. '.' matches any single character *except* the newline. To + match any character, use '(.|\n)'. + + 5. '^' matches the beginning of each line, not just the beginning + of the string. Similarly, '$' matches the end of each line. + + 6. An exception will be thrown if regexes contains an invalid + regex. + + Args: + actual_str: The string we try to match with the items in regexes. + regexes: The regular expressions we want to match against str. + See "Notes" above for detailed notes on how this is interpreted. + message: The message to be printed if the test fails. + """ + if isinstance(regexes, _TEXT_OR_BINARY_TYPES): + self.fail('regexes is string or bytes; use assertRegex instead.', + message) + if not regexes: + self.fail('No regexes specified.', message) + + regex_type = type(regexes[0]) + for regex in regexes[1:]: + if type(regex) is not regex_type: # pylint: disable=unidiomatic-typecheck + self.fail('regexes list must all be the same type.', message) + + if regex_type is bytes and isinstance(actual_str, str): + regexes = [regex.decode('utf-8') for regex in regexes] + regex_type = str + elif regex_type is str and isinstance(actual_str, bytes): + regexes = [regex.encode('utf-8') for regex in regexes] + regex_type = bytes + + if regex_type is str: + regex = u'(?:%s)' % u')|(?:'.join(regexes) + elif regex_type is bytes: + regex = b'(?:' + (b')|(?:'.join(regexes)) + b')' + else: + self.fail('Only know how to deal with unicode str or bytes regexes.', + message) + + if not re.search(regex, actual_str, re.MULTILINE): + self.fail('"%s" does not contain any of these regexes: %s.' % + (actual_str, regexes), message) + + def assertCommandSucceeds(self, command, regexes=(b'',), env=None, + close_fds=True, msg=None): + """Asserts that a shell command succeeds (i.e. exits with code 0). + + Args: + command: List or string representing the command to run. + regexes: List of regular expression byte strings that match success. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertEqual( + ret_code, 0, + self._formatMessage(msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s' % (_quote_long_string(command_string), + ret_code, + _quote_long_string(err))) + ) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + def assertCommandFails(self, command, regexes, env=None, close_fds=True, + msg=None): + """Asserts a shell command fails and the error matches a regex in a list. + + Args: + command: List or string representing the command to run. + regexes: the list of regular expression strings. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after + forking. + msg: Optional message to report on failure. + """ + (ret_code, err) = get_command_stderr(command, env, close_fds) + + # We need bytes regexes here because `err` is bytes. + # Accommodate code which listed their output regexes w/o the b'' prefix by + # converting them to bytes for the user. + if isinstance(regexes[0], str): + regexes = [regex.encode('utf-8') for regex in regexes] + + command_string = get_command_string(command) + self.assertNotEqual( + ret_code, 0, + self._formatMessage(msg, 'The following command succeeded ' + 'while expected to fail:\n%s' % + _quote_long_string(command_string))) + self.assertRegexMatch( + err, + regexes, + message=self._formatMessage( + msg, + 'Running command\n' + '%s failed with error code %s and message\n' + '%s which matches no regex in %s' % ( + _quote_long_string(command_string), + ret_code, + _quote_long_string(err), + regexes))) + + class _AssertRaisesContext(object): + + def __init__(self, expected_exception, test_case, test_func, msg=None): + self.expected_exception = expected_exception + self.test_case = test_case + self.test_func = test_func + self.msg = msg + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + self.test_case.fail(self.expected_exception.__name__ + ' not raised', + self.msg) + if not issubclass(exc_type, self.expected_exception): + return False + self.test_func(exc_value) + if exc_value: + self.exception = exc_value.with_traceback(None) + return True + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithPredicateMatch( + self, expected_exception, predicate, callable_obj: Callable[..., Any], + *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithPredicateMatch(self, expected_exception, predicate, + callable_obj=None, *args, **kwargs): + """Asserts that exception is thrown and predicate(exception) is true. + + Args: + expected_exception: Exception class expected to be raised. + predicate: Function of one argument that inspects the passed-in exception + and returns True (success) or False (please fail the test). + callable_obj: Function to be called. + *args: Extra args. + **kwargs: Extra keyword args. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + self.assertTrue(predicate(err), + '%r does not match predicate %r' % (err, predicate)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message + ) -> _AssertRaisesContext: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + @typing.overload + def assertRaisesWithLiteralMatch( + self, expected_exception, expected_exception_message, + callable_obj: Callable[..., Any], *args, **kwargs) -> None: + # The purpose of this return statement is to work around + # https://github.com/PyCQA/pylint/issues/5273; it is otherwise ignored. + return self._AssertRaisesContext(None, None, None) + + def assertRaisesWithLiteralMatch(self, expected_exception, + expected_exception_message, + callable_obj=None, *args, **kwargs): + """Asserts that the message in a raised exception equals the given string. + + Unlike assertRaisesRegex, this method takes a literal string, not + a regular expression. + + with self.assertRaisesWithLiteralMatch(ExType, 'message'): + DoSomething() + + Args: + expected_exception: Exception class expected to be raised. + expected_exception_message: String message expected in the raised + exception. For a raise exception e, expected_exception_message must + equal str(e). + callable_obj: Function to be called, or None to return a context. + *args: Extra args. + **kwargs: Extra kwargs. + + Returns: + A context manager if callable_obj is None. Otherwise, None. + + Raises: + self.failureException if callable_obj does not raise a matching exception. + """ + def Check(err): + actual_exception_message = str(err) + self.assertTrue(expected_exception_message == actual_exception_message, + 'Exception message does not match.\n' + 'Expected: %r\n' + 'Actual: %r' % (expected_exception_message, + actual_exception_message)) + + context = self._AssertRaisesContext(expected_exception, self, Check) + if callable_obj is None: + return context + with context: + callable_obj(*args, **kwargs) + + def assertContainsInOrder(self, strings, target, msg=None): + """Asserts that the strings provided are found in the target in order. + + This may be useful for checking HTML output. + + Args: + strings: A list of strings, such as [ 'fox', 'dog' ] + target: A target string in which to look for the strings, such as + 'The quick brown fox jumped over the lazy dog'. + msg: Optional message to report on failure. + """ + if isinstance(strings, (bytes, unicode if str is bytes else str)): + strings = (strings,) + + current_index = 0 + last_string = None + for string in strings: + index = target.find(str(string), current_index) + if index == -1 and current_index == 0: + self.fail("Did not find '%s' in '%s'" % + (string, target), msg) + elif index == -1: + self.fail("Did not find '%s' after '%s' in '%s'" % + (string, last_string, target), msg) + last_string = string + current_index = index + + def assertContainsSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as a subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, but possibly with other elements interspersed. For example, [1, 2, 3] + is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be a subsequence of container. + msg: Optional message to report on failure. + """ + first_nonmatching = None + reversed_container = list(reversed(container)) + subsequence = list(subsequence) + + for e in subsequence: + if e not in reversed_container: + first_nonmatching = e + break + while e != reversed_container.pop(): + pass + + if first_nonmatching is not None: + self.fail('%s not a subsequence of %s. First non-matching element: %s' % + (subsequence, container, first_nonmatching), msg) + + def assertContainsExactSubsequence(self, container, subsequence, msg=None): + """Asserts that "container" contains "subsequence" as an exact subsequence. + + Asserts that "container" contains all the elements of "subsequence", in + order, and without other elements interspersed. For example, [1, 2, 3] is an + exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0]. + + Args: + container: the list we're testing for subsequence inclusion. + subsequence: the list we hope will be an exact subsequence of container. + msg: Optional message to report on failure. + """ + container = list(container) + subsequence = list(subsequence) + longest_match = 0 + + for start in range(1 + len(container) - len(subsequence)): + if longest_match == len(subsequence): + break + index = 0 + while (index < len(subsequence) and + subsequence[index] == container[start + index]): + index += 1 + longest_match = max(longest_match, index) + + if longest_match < len(subsequence): + self.fail('%s not an exact subsequence of %s. ' + 'Longest matching prefix: %s' % + (subsequence, container, subsequence[:longest_match]), msg) + + def assertTotallyOrdered(self, *groups, **kwargs): + """Asserts that total ordering has been implemented correctly. + + For example, say you have a class A that compares only on its attribute x. + Comparators other than ``__lt__`` are omitted for brevity:: + + class A(object): + def __init__(self, x, y): + self.x = x + self.y = y + + def __hash__(self): + return hash(self.x) + + def __lt__(self, other): + try: + return self.x < other.x + except AttributeError: + return NotImplemented + + assertTotallyOrdered will check that instances can be ordered correctly. + For example:: + + self.assertTotallyOrdered( + [None], # None should come before everything else. + [1], # Integers sort earlier. + [A(1, 'a')], + [A(2, 'b')], # 2 is after 1. + [A(3, 'c'), A(3, 'd')], # The second argument is irrelevant. + [A(4, 'z')], + ['foo']) # Strings sort last. + + Args: + *groups: A list of groups of elements. Each group of elements is a list + of objects that are equal. The elements in each group must be less + than the elements in the group after it. For example, these groups are + totally ordered: ``[None]``, ``[1]``, ``[2, 2]``, ``[3]``. + **kwargs: optional msg keyword argument can be passed. + """ + + def CheckOrder(small, big): + """Ensures small is ordered before big.""" + self.assertFalse(small == big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertTrue(small != big, + self._formatMessage(msg, '%r unexpectedly equals %r' % + (small, big))) + self.assertLess(small, big, msg) + self.assertFalse(big < small, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (big, small))) + self.assertLessEqual(small, big, msg) + self.assertFalse(big <= small, self._formatMessage( + '%r unexpectedly less than or equal to %r' % (big, small), msg + )) + self.assertGreater(big, small, msg) + self.assertFalse(small > big, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (small, big))) + self.assertGreaterEqual(big, small) + self.assertFalse(small >= big, self._formatMessage( + msg, + '%r unexpectedly greater than or equal to %r' % (small, big))) + + def CheckEqual(a, b): + """Ensures that a and b are equal.""" + self.assertEqual(a, b, msg) + self.assertFalse(a != b, + self._formatMessage(msg, '%r unexpectedly unequals %r' % + (a, b))) + + # Objects that compare equal must hash to the same value, but this only + # applies if both objects are hashable. + if (isinstance(a, abc.Hashable) and + isinstance(b, abc.Hashable)): + self.assertEqual( + hash(a), hash(b), + self._formatMessage( + msg, 'hash %d of %r unexpectedly not equal to hash %d of %r' % + (hash(a), a, hash(b), b))) + + self.assertFalse(a < b, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (a, b))) + self.assertFalse(b < a, + self._formatMessage(msg, + '%r unexpectedly less than %r' % + (b, a))) + self.assertLessEqual(a, b, msg) + self.assertLessEqual(b, a, msg) # pylint: disable=arguments-out-of-order + self.assertFalse(a > b, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (a, b))) + self.assertFalse(b > a, + self._formatMessage(msg, + '%r unexpectedly greater than %r' % + (b, a))) + self.assertGreaterEqual(a, b, msg) + self.assertGreaterEqual(b, a, msg) # pylint: disable=arguments-out-of-order + + msg = kwargs.get('msg') + + # For every combination of elements, check the order of every pair of + # elements. + for elements in itertools.product(*groups): + elements = list(elements) + for index, small in enumerate(elements[:-1]): + for big in elements[index + 1:]: + CheckOrder(small, big) + + # Check that every element in each group is equal. + for group in groups: + for a in group: + CheckEqual(a, a) + for a, b in itertools.product(group, group): + CheckEqual(a, b) + + def assertDictEqual(self, a, b, msg=None): + """Raises AssertionError if a and b are not equal dictionaries. + + Args: + a: A dict, the expected value. + b: A dict, the actual value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dictionaries are not equal. + """ + self.assertIsInstance(a, dict, self._formatMessage( + msg, + 'First argument is not a dictionary' + )) + self.assertIsInstance(b, dict, self._formatMessage( + msg, + 'Second argument is not a dictionary' + )) + + def Sorted(list_of_items): + try: + return sorted(list_of_items) # In 3.3, unordered are possible. + except TypeError: + return list_of_items + + if a == b: + return + a_items = Sorted(list(a.items())) + b_items = Sorted(list(b.items())) + + unexpected = [] + missing = [] + different = [] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + + def Repr(dikt): + """Deterministic repr for dict.""" + # Sort the entries based on their repr, not based on their sort order, + # which will be non-deterministic across executions, for many types. + entries = sorted((safe_repr(k), safe_repr(v)) for k, v in dikt.items()) + return '{%s}' % (', '.join('%s: %s' % pair for pair in entries)) + + message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')] + + # The standard library default output confounds lexical difference with + # value difference; treat them separately. + for a_key, a_value in a_items: + if a_key not in b: + missing.append((a_key, a_value)) + elif a_value != b[a_key]: + different.append((a_key, a_value, b[a_key])) + + for b_key, b_value in b_items: + if b_key not in a: + unexpected.append((b_key, b_value)) + + if unexpected: + message.append( + 'Unexpected, but present entries:\n%s' % ''.join( + '%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected)) + + if different: + message.append( + 'repr() of differing entries:\n%s' % ''.join( + '%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value), + safe_repr(b_value)) + for k, a_value, b_value in different)) + + if missing: + message.append( + 'Missing entries:\n%s' % ''.join( + ('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing))) + + raise self.failureException('\n'.join(message)) + + def assertDataclassEqual(self, first, second, msg=None): + """Asserts two dataclasses are equal with more informative errors. + + Arguments must both be dataclasses. This compares equality of individual + fields and takes care to not compare fields that are marked as + non-comparable. It gives per field differences, which are easier to parse + than the comparison of the string representations from assertEqual. + + In cases where the dataclass has a custom __eq__, and it is defined in a + way that is inconsistent with equality of comparable fields, we raise an + exception without further trying to figure out how they are different. + + Args: + first: A dataclass, the first value. + second: A dataclass, the second value. + msg: An optional str, the associated message. + + Raises: + AssertionError: if the dataclasses are not equal. + """ + + if not dataclasses.is_dataclass(first) or isinstance(first, type): + raise self.failureException('First argument is not a dataclass instance.') + if not dataclasses.is_dataclass(second) or isinstance(second, type): + raise self.failureException( + 'Second argument is not a dataclass instance.' + ) + + if first == second: + return + + if type(first) is not type(second): + self.fail( + 'Found different dataclass types: %s != %s' + % (type(first), type(second)), + msg, + ) + + # Make sure to skip fields that are marked compare=False. + different = [ + (f.name, getattr(first, f.name), getattr(second, f.name)) + for f in dataclasses.fields(first) + if f.compare and getattr(first, f.name) != getattr(second, f.name) + ] + + safe_repr = unittest.util.safe_repr # pytype: disable=module-attr + message = ['%s != %s' % (safe_repr(first), safe_repr(second))] + if different: + message.append('Fields that differ:') + message.extend( + '%s: %s != %s' % (k, safe_repr(first_v), safe_repr(second_v)) + for k, first_v, second_v in different + ) + else: + message.append( + 'Cannot detect difference by examining the fields of the dataclass.' + ) + + raise self.fail('\n'.join(message), msg) + + def assertUrlEqual(self, a, b, msg=None): + """Asserts that urls are equal, ignoring ordering of query params.""" + parsed_a = parse.urlparse(a) + parsed_b = parse.urlparse(b) + self.assertEqual(parsed_a.scheme, parsed_b.scheme, msg) + self.assertEqual(parsed_a.netloc, parsed_b.netloc, msg) + self.assertEqual(parsed_a.path, parsed_b.path, msg) + self.assertEqual(parsed_a.fragment, parsed_b.fragment, msg) + self.assertEqual(sorted(parsed_a.params.split(';')), + sorted(parsed_b.params.split(';')), msg) + self.assertDictEqual( + parse.parse_qs(parsed_a.query, keep_blank_values=True), + parse.parse_qs(parsed_b.query, keep_blank_values=True), msg) + + def assertSameStructure(self, a, b, aname='a', bname='b', msg=None): + """Asserts that two values contain the same structural content. + + The two arguments should be data trees consisting of trees of dicts and + lists. They will be deeply compared by walking into the contents of dicts + and lists; other items will be compared using the == operator. + If the two structures differ in content, the failure message will indicate + the location within the structures where the first difference is found. + This may be helpful when comparing large structures. + + Mixed Sequence and Set types are supported. Mixed Mapping types are + supported, but the order of the keys will not be considered in the + comparison. + + Args: + a: The first structure to compare. + b: The second structure to compare. + aname: Variable name to use for the first structure in assertion messages. + bname: Variable name to use for the second structure. + msg: Additional text to include in the failure message. + """ + + # Accumulate all the problems found so we can report all of them at once + # rather than just stopping at the first + problems = [] + + _walk_structure_for_problems(a, b, aname, bname, problems, + self.assertEqual, self.failureException) + + # Avoid spamming the user toooo much + if self.maxDiff is not None: + max_problems_to_show = self.maxDiff // 80 + if len(problems) > max_problems_to_show: + problems = problems[0:max_problems_to_show-1] + ['...'] + + if problems: + self.fail('; '.join(problems), msg) + + def assertJsonEqual(self, first, second, msg=None): + """Asserts that the JSON objects defined in two strings are equal. + + A summary of the differences will be included in the failure message + using assertSameStructure. + + Args: + first: A string containing JSON to decode and compare to second. + second: A string containing JSON to decode and compare to first. + msg: Additional text to include in the failure message. + """ + try: + first_structured = json.loads(first) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode first JSON value %s: %s' % (first, e))) + + try: + second_structured = json.loads(second) + except ValueError as e: + raise ValueError(self._formatMessage( + msg, + 'could not decode second JSON value %s: %s' % (second, e))) + + self.assertSameStructure(first_structured, second_structured, + aname='first', bname='second', msg=msg) + + def _getAssertEqualityFunc(self, first, second): + # type: (Any, Any) -> Callable[..., None] + try: + return super(TestCase, self)._getAssertEqualityFunc(first, second) + except AttributeError: + # This is a workaround if unittest.TestCase.__init__ was never run. + # It usually means that somebody created a subclass just for the + # assertions and has overridden __init__. "assertTrue" is a safe + # value that will not make __init__ raise a ValueError. + test_method = getattr(self, '_testMethodName', 'assertTrue') + super(TestCase, self).__init__(test_method) + + return super(TestCase, self)._getAssertEqualityFunc(first, second) + + def fail(self, msg=None, user_msg=None) -> NoReturn: + """Fail immediately with the given standard message and user message.""" + return super(TestCase, self).fail(self._formatMessage(user_msg, msg)) + + +def _sorted_list_difference(expected, actual): + # type: (List[_T], List[_T]) -> Tuple[List[_T], List[_T]] + """Finds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + + Args: + expected: The list we expected. + actual: The list we actually got. + Returns: + (missing, unexpected) + missing: items in expected that are not in actual. + unexpected: items in actual that are not in expected. + """ + i = j = 0 + missing = [] + unexpected = [] + while True: + try: + e = expected[i] + a = actual[j] + if e < a: + missing.append(e) + i += 1 + while expected[i] == e: + i += 1 + elif e > a: + unexpected.append(a) + j += 1 + while actual[j] == a: + j += 1 + else: + i += 1 + try: + while expected[i] == e: + i += 1 + finally: + j += 1 + while actual[j] == a: + j += 1 + except IndexError: + missing.extend(expected[i:]) + unexpected.extend(actual[j:]) + break + return missing, unexpected + + +def _are_both_of_integer_type(a, b): + # type: (object, object) -> bool + return isinstance(a, int) and isinstance(b, int) + + +def _are_both_of_sequence_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Sequence) and isinstance( + b, abc.Sequence) and not isinstance( + a, _TEXT_OR_BINARY_TYPES) and not isinstance(b, _TEXT_OR_BINARY_TYPES) + + +def _are_both_of_set_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Set) and isinstance(b, abc.Set) + + +def _are_both_of_mapping_type(a, b): + # type: (object, object) -> bool + return isinstance(a, abc.Mapping) and isinstance( + b, abc.Mapping) + + +def _walk_structure_for_problems( + a, b, aname, bname, problem_list, leaf_assert_equal_func, failure_exception +): + """The recursive comparison behind assertSameStructure.""" + if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck + _are_both_of_integer_type(a, b) or _are_both_of_sequence_type(a, b) or + _are_both_of_set_type(a, b) or _are_both_of_mapping_type(a, b)): + # We do not distinguish between int and long types as 99.99% of Python 2 + # code should never care. They collapse into a single type in Python 3. + problem_list.append('%s is a %r but %s is a %r' % + (aname, type(a), bname, type(b))) + # If they have different types there's no point continuing + return + + if isinstance(a, abc.Set): + for k in a: + if k not in b: + problem_list.append( + '%s has %r but %s does not' % (aname, k, bname)) + for k in b: + if k not in a: + problem_list.append('%s lacks %r but %s has it' % (aname, k, bname)) + + # NOTE: a or b could be a defaultdict, so we must take care that the traversal + # doesn't modify the data. + elif isinstance(a, abc.Mapping): + for k in a: + if k in b: + _walk_structure_for_problems( + a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k), + problem_list, leaf_assert_equal_func, failure_exception) + else: + problem_list.append( + "%s has [%r] with value %r but it's missing in %s" % + (aname, k, a[k], bname)) + for k in b: + if k not in a: + problem_list.append( + '%s lacks [%r] but %s has it with value %r' % + (aname, k, bname, b[k])) + + # Strings/bytes are Sequences but we'll just do those with regular != + elif (isinstance(a, abc.Sequence) and + not isinstance(a, _TEXT_OR_BINARY_TYPES)): + minlen = min(len(a), len(b)) + for i in range(minlen): + _walk_structure_for_problems( + a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i), + problem_list, leaf_assert_equal_func, failure_exception) + for i in range(minlen, len(a)): + problem_list.append('%s has [%i] with value %r but %s does not' % + (aname, i, a[i], bname)) + for i in range(minlen, len(b)): + problem_list.append('%s lacks [%i] but %s has it with value %r' % + (aname, i, bname, b[i])) + + else: + try: + leaf_assert_equal_func(a, b) + except failure_exception: + problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b)) + + +def get_command_string(command): + """Returns an escaped string that can be used as a shell command. + + Args: + command: List or string representing the command to run. + Returns: + A string suitable for use as a shell command. + """ + if isinstance(command, str): + return command + else: + if os.name == 'nt': + return ' '.join(command) + else: + # The following is identical to Python 3's shlex.quote function. + command_string = '' + for word in command: + # Single quote word, and replace each ' in word with '"'"' + command_string += "'" + word.replace("'", "'\"'\"'") + "' " + return command_string[:-1] + + +def get_command_stderr(command, env=None, close_fds=True): + """Runs the given shell command and returns a tuple. + + Args: + command: List or string representing the command to run. + env: Dictionary of environment variable settings. If None, no environment + variables will be set for the child process. This is to make tests + more hermetic. NOTE: this behavior is different than the standard + subprocess module. + close_fds: Whether or not to close all open fd's in the child after forking. + On Windows, this is ignored and close_fds is always False. + + Returns: + Tuple of (exit status, text printed to stdout and stderr by the command). + """ + if env is None: env = {} + if os.name == 'nt': + # Windows does not support setting close_fds to True while also redirecting + # standard handles. + close_fds = False + + use_shell = isinstance(command, str) + process = subprocess.Popen( + command, + close_fds=close_fds, + env=env, + shell=use_shell, + stderr=subprocess.STDOUT, + stdout=subprocess.PIPE) + output = process.communicate()[0] + exit_status = process.wait() + return (exit_status, output) + + +def _quote_long_string(s): + # type: (Union[Text, bytes, bytearray]) -> Text + """Quotes a potentially multi-line string to make the start and end obvious. + + Args: + s: A string. + + Returns: + The quoted string. + """ + if isinstance(s, (bytes, bytearray)): + try: + s = s.decode('utf-8') + except UnicodeDecodeError: + s = str(s) + return ('8<-----------\n' + + s + '\n' + + '----------->8\n') + + +def print_python_version(): + # type: () -> None + # Having this in the test output logs by default helps debugging when all + # you've got is the log and no other idea of which Python was used. + sys.stderr.write('Running tests under Python {0[0]}.{0[1]}.{0[2]}: ' + '{1}\n'.format( + sys.version_info, + sys.executable if sys.executable else 'embedded.')) + + +def main(*args, **kwargs): + # type: (Text, Any) -> None + """Executes a set of Python unit tests. + + Usually this function is called without arguments, so the + unittest.TestProgram instance will get created with the default settings, + so it will run all test methods of all TestCase classes in the ``__main__`` + module. + + Args: + *args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + **kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + print_python_version() + _run_in_app(run_tests, args, kwargs) + + +def _is_in_app_main(): + # type: () -> bool + """Returns True iff app.run is active.""" + f = sys._getframe().f_back # pylint: disable=protected-access + while f: + if f.f_code == app.run.__code__: + return True + f = f.f_back + return False + + +def _register_sigterm_with_faulthandler(): + # type: () -> None + """Have faulthandler dump stacks on SIGTERM. Useful to diagnose timeouts.""" + if getattr(faulthandler, 'register', None): + # faulthandler.register is not available on Windows. + # faulthandler.enable() is already called by app.run. + try: + faulthandler.register(signal.SIGTERM, chain=True) # pytype: disable=module-attr + except Exception as e: # pylint: disable=broad-except + sys.stderr.write('faulthandler.register(SIGTERM) failed ' + '%r; ignoring.\n' % e) + + +def _run_in_app(function, args, kwargs): + # type: (Callable[..., None], Sequence[Text], Mapping[Text, Any]) -> None + """Executes a set of Python unit tests, ensuring app.run. + + This is a private function, users should call absltest.main(). + + _run_in_app calculates argv to be the command-line arguments of this program + (without the flags), sets the default of FLAGS.alsologtostderr to True, + then it calls function(argv, args, kwargs), making sure that `function' + will get called within app.run(). _run_in_app does this by checking whether + it is called by app.run(), or by calling app.run() explicitly. + + The reason why app.run has to be ensured is to make sure that + flags are parsed and stripped properly, and other initializations done by + the app module are also carried out, no matter if absltest.run() is called + from within or outside app.run(). + + If _run_in_app is called from within app.run(), then it will reparse + sys.argv and pass the result without command-line flags into the argv + argument of `function'. The reason why this parsing is needed is that + __main__.main() calls absltest.main() without passing its argv. So the + only way _run_in_app could get to know the argv without the flags is that + it reparses sys.argv. + + _run_in_app changes the default of FLAGS.alsologtostderr to True so that the + test program's stderr will contain all the log messages unless otherwise + specified on the command-line. This overrides any explicit assignment to + FLAGS.alsologtostderr by the test program prior to the call to _run_in_app() + (e.g. in __main__.main). + + Please note that _run_in_app (and the function it calls) is allowed to make + changes to kwargs. + + Args: + function: absltest.run_tests or a similar function. It will be called as + function(argv, args, kwargs) where argv is a list containing the + elements of sys.argv without the command-line flags. + args: Positional arguments passed through to unittest.TestProgram.__init__. + kwargs: Keyword arguments passed through to unittest.TestProgram.__init__. + """ + if _is_in_app_main(): + _register_sigterm_with_faulthandler() + + # Change the default of alsologtostderr from False to True, so the test + # programs's stderr will contain all the log messages. + # If --alsologtostderr=false is specified in the command-line, or user + # has called FLAGS.alsologtostderr = False before, then the value is kept + # False. + FLAGS.set_default('alsologtostderr', True) + + # Here we only want to get the `argv` without the flags. To avoid any + # side effects of parsing flags, we temporarily stub out the `parse` method + stored_parse_methods = {} + noop_parse = lambda _: None + for name in FLAGS: + # Avoid any side effects of parsing flags. + stored_parse_methods[name] = FLAGS[name].parse + # This must be a separate loop since multiple flag names (short_name=) can + # point to the same flag object. + for name in FLAGS: + FLAGS[name].parse = noop_parse + try: + argv = FLAGS(sys.argv) + finally: + for name in FLAGS: + FLAGS[name].parse = stored_parse_methods[name] + sys.stdout.flush() + + function(argv, args, kwargs) + else: + # Send logging to stderr. Use --alsologtostderr instead of --logtostderr + # in case tests are reading their own logs. + FLAGS.set_default('alsologtostderr', True) + + def main_function(argv): + _register_sigterm_with_faulthandler() + function(argv, args, kwargs) + + app.run(main=main_function) + + +def _is_suspicious_attribute(testCaseClass, name): + # type: (Type, Text) -> bool + """Returns True if an attribute is a method named like a test method.""" + if name.startswith('Test') and len(name) > 4 and name[4].isupper(): + attr = getattr(testCaseClass, name) + if inspect.isfunction(attr) or inspect.ismethod(attr): + args = inspect.getfullargspec(attr) + return (len(args.args) == 1 and args.args[0] == 'self' and + args.varargs is None and args.varkw is None and + not args.kwonlyargs) + return False + + +def skipThisClass(reason): + # type: (Text) -> Callable[[_T], _T] + """Skip tests in the decorated TestCase, but not any of its subclasses. + + This decorator indicates that this class should skip all its tests, but not + any of its subclasses. Useful for if you want to share testMethod or setUp + implementations between a number of concrete testcase classes. + + Example usage, showing how you can share some common test methods between + subclasses. In this example, only ``BaseTest`` will be marked as skipped, and + not RealTest or SecondRealTest:: + + @absltest.skipThisClass("Shared functionality") + class BaseTest(absltest.TestCase): + def test_simple_functionality(self): + self.assertEqual(self.system_under_test.method(), 1) + + class RealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(argument) + + def test_specific_behavior(self): + ... + + class SecondRealTest(BaseTest): + def setUp(self): + super().setUp() + self.system_under_test = MakeSystem(other_arguments) + + def test_other_behavior(self): + ... + + Args: + reason: The reason we have a skip in place. For instance: 'shared test + methods' or 'shared assertion methods'. + + Returns: + Decorator function that will cause a class to be skipped. + """ + if isinstance(reason, type): + raise TypeError('Got {!r}, expected reason as string'.format(reason)) + + def _skip_class(test_case_class): + if not issubclass(test_case_class, unittest.TestCase): + raise TypeError( + 'Decorating {!r}, expected TestCase subclass'.format(test_case_class)) + + # Only shadow the setUpClass method if it is directly defined. If it is + # in the parent class we invoke it via a super() call instead of holding + # a reference to it. + shadowed_setupclass = test_case_class.__dict__.get('setUpClass', None) + + @classmethod + def replacement_setupclass(cls, *args, **kwargs): + # Skip this class if it is the one that was decorated with @skipThisClass + if cls is test_case_class: + raise SkipTest(reason) + if shadowed_setupclass: + # Pass along `cls` so the MRO chain doesn't break. + # The original method is a `classmethod` descriptor, which can't + # be directly called, but `__func__` has the underlying function. + return shadowed_setupclass.__func__(cls, *args, **kwargs) + else: + # Because there's no setUpClass() defined directly on test_case_class, + # we call super() ourselves to continue execution of the inheritance + # chain. + return super(test_case_class, cls).setUpClass(*args, **kwargs) + + test_case_class.setUpClass = replacement_setupclass + return test_case_class + + return _skip_class + + +class TestLoader(unittest.TestLoader): + """A test loader which supports common test features. + + Supported features include: + * Banning untested methods with test-like names: methods attached to this + testCase with names starting with `Test` are ignored by the test runner, + and often represent mistakenly-omitted test cases. This loader will raise + a TypeError when attempting to load a TestCase with such methods. + * Randomization of test case execution order (optional). + """ + + _ERROR_MSG = textwrap.dedent("""Method '%s' is named like a test case but + is not one. This is often a bug. If you want it to be a test method, + name it with 'test' in lowercase. If not, rename the method to not begin + with 'Test'.""") + + def __init__(self, *args, **kwds): + super(TestLoader, self).__init__(*args, **kwds) + seed = _get_default_randomize_ordering_seed() + if seed: + self._randomize_ordering_seed = seed + self._random = random.Random(self._randomize_ordering_seed) + else: + self._randomize_ordering_seed = None + self._random = None + + def getTestCaseNames(self, testCaseClass): # pylint:disable=invalid-name + """Validates and returns a (possibly randomized) list of test case names.""" + for name in dir(testCaseClass): + if _is_suspicious_attribute(testCaseClass, name): + raise TypeError(TestLoader._ERROR_MSG % name) + names = list(super(TestLoader, self).getTestCaseNames(testCaseClass)) + if self._randomize_ordering_seed is not None: + logging.info( + 'Randomizing test order with seed: %d', self._randomize_ordering_seed) + logging.info( + 'To reproduce this order, re-run with ' + '--test_randomize_ordering_seed=%d', self._randomize_ordering_seed) + self._random.shuffle(names) + return names + + +def get_default_xml_output_filename(): + # type: () -> Optional[Text] + if os.environ.get('XML_OUTPUT_FILE'): + return os.environ['XML_OUTPUT_FILE'] + elif os.environ.get('RUNNING_UNDER_TEST_DAEMON'): + return os.path.join(os.path.dirname(TEST_TMPDIR.value), 'test_detail.xml') + elif os.environ.get('TEST_XMLOUTPUTDIR'): + return os.path.join( + os.environ['TEST_XMLOUTPUTDIR'], + os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.xml') + + +def _setup_filtering(argv: MutableSequence[str]) -> bool: + """Implements the bazel test filtering protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_ONLY: string, if set, is forwarded to the unittest + framework to use as a test filter. Its value is split with shlex, then: + 1. On Python 3.6 and before, split values are passed as positional + arguments on argv. + 2. On Python 3.7+, split values are passed to unittest's `-k` flag. Tests + are matched by glob patterns or substring. See + https://docs.python.org/3/library/unittest.html#cmdoption-unittest-k + + Args: + argv: the argv to mutate in-place. + + Returns: + Whether test filtering is requested. + """ + test_filter = os.environ.get('TESTBRIDGE_TEST_ONLY') + if argv is None or not test_filter: + return False + + filters = shlex.split(test_filter) + if sys.version_info[:2] >= (3, 7): + filters = ['-k=' + test_filter for test_filter in filters] + + argv[1:1] = filters + return True + + +def _setup_test_runner_fail_fast(argv): + # type: (MutableSequence[Text]) -> None + """Implements the bazel test fail fast protocol. + + The following environment variable is used in this method: + + TESTBRIDGE_TEST_RUNNER_FAIL_FAST=<1|0> + + If set to 1, --failfast is passed to the unittest framework to return upon + first failure. + + Args: + argv: the argv to mutate in-place. + """ + + if argv is None: + return + + if os.environ.get('TESTBRIDGE_TEST_RUNNER_FAIL_FAST') != '1': + return + + argv[1:1] = ['--failfast'] + + +def _setup_sharding( + custom_loader: Optional[unittest.TestLoader] = None, +) -> Tuple[unittest.TestLoader, Optional[int]]: + """Implements the bazel sharding protocol. + + The following environment variables are used in this method: + + TEST_SHARD_STATUS_FILE: string, if set, points to a file. We write a blank + file to tell the test runner that this test implements the test sharding + protocol. + + TEST_TOTAL_SHARDS: int, if set, sharding is requested. + + TEST_SHARD_INDEX: int, must be set if TEST_TOTAL_SHARDS is set. Specifies + the shard index for this instance of the test process. Must satisfy: + 0 <= TEST_SHARD_INDEX < TEST_TOTAL_SHARDS. + + Args: + custom_loader: A TestLoader to be made sharded. + + Returns: + A tuple of ``(test_loader, shard_index)``. ``test_loader`` is for + shard-filtering or the standard test loader depending on the sharding + environment variables. ``shard_index`` is the shard index, or ``None`` when + sharding is not used. + """ + + # It may be useful to write the shard file even if the other sharding + # environment variables are not set. Test runners may use this functionality + # to query whether a test binary implements the test sharding protocol. + if 'TEST_SHARD_STATUS_FILE' in os.environ: + try: + with open(os.environ['TEST_SHARD_STATUS_FILE'], 'w') as f: + f.write('') + except IOError: + sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.' + % os.environ['TEST_SHARD_STATUS_FILE']) + sys.exit(1) + + base_loader = custom_loader or TestLoader() + if 'TEST_TOTAL_SHARDS' not in os.environ: + # Not using sharding, use the expected test loader. + return base_loader, None + + total_shards = int(os.environ['TEST_TOTAL_SHARDS']) + shard_index = int(os.environ['TEST_SHARD_INDEX']) + + if shard_index < 0 or shard_index >= total_shards: + sys.stderr.write('ERROR: Bad sharding values. index=%d, total=%d\n' % + (shard_index, total_shards)) + sys.exit(1) + + # Replace the original getTestCaseNames with one that returns + # the test case names for this shard. + delegate_get_names = base_loader.getTestCaseNames + + bucket_iterator = itertools.cycle(range(total_shards)) + + def getShardedTestCaseNames(testCaseClass): + filtered_names = [] + # We need to sort the list of tests in order to determine which tests this + # shard is responsible for; however, it's important to preserve the order + # returned by the base loader, e.g. in the case of randomized test ordering. + ordered_names = delegate_get_names(testCaseClass) + for testcase in sorted(ordered_names): + bucket = next(bucket_iterator) + if bucket == shard_index: + filtered_names.append(testcase) + return [x for x in ordered_names if x in filtered_names] + + base_loader.getTestCaseNames = getShardedTestCaseNames + return base_loader, shard_index + + +def _run_and_get_tests_result( + argv: MutableSequence[str], + args: Sequence[Any], + kwargs: MutableMapping[str, Any], + xml_test_runner_class: Type[unittest.TextTestRunner], +) -> Tuple[unittest.TestResult, bool]: + """Same as run_tests, but it doesn't exit. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + xml_test_runner_class: The type of the test runner class. + + Returns: + A tuple of ``(test_result, fail_when_no_tests_ran)``. + ``fail_when_no_tests_ran`` indicates whether the test should fail when + no tests ran. + """ + + # The entry from kwargs overrides argv. + argv = kwargs.pop('argv', argv) + + if sys.version_info[:2] >= (3, 12): + # Python 3.12 unittest changed the behavior from PASS to FAIL in + # https://github.com/python/cpython/pull/102051. absltest follows this. + fail_when_no_tests_ran = True + else: + # Historically, absltest and unittest before Python 3.12 passes if no tests + # ran. + fail_when_no_tests_ran = False + + # Set up test filtering if requested in environment. + if _setup_filtering(argv): + # When test filtering is requested, ideally we also want to fail when no + # tests ran. However, the test filters are usually done when running bazel. + # When you run multiple targets, e.g. `bazel test //my_dir/... + # --test_filter=MyTest`, you don't necessarily want individual tests to fail + # because no tests match in that particular target. + # Due to this use case, we don't fail when test filtering is requested via + # the environment variable from bazel. + fail_when_no_tests_ran = False + + # Set up --failfast as requested in environment + _setup_test_runner_fail_fast(argv) + + # Shard the (default or custom) loader if sharding is turned on. + kwargs['testLoader'], shard_index = _setup_sharding( + kwargs.get('testLoader', None) + ) + if shard_index is not None and shard_index > 0: + # When sharding is requested, all the shards except the first one shall not + # fail when no tests ran. This happens when the shard count is greater than + # the test case count. + fail_when_no_tests_ran = False + + # XML file name is based upon (sorted by priority): + # --xml_output_file flag, XML_OUTPUT_FILE variable, + # TEST_XMLOUTPUTDIR variable or RUNNING_UNDER_TEST_DAEMON variable. + if not FLAGS.xml_output_file: + FLAGS.xml_output_file = get_default_xml_output_filename() + xml_output_file = FLAGS.xml_output_file + + xml_buffer = None + if xml_output_file: + xml_output_dir = os.path.dirname(xml_output_file) + if xml_output_dir and not os.path.isdir(xml_output_dir): + try: + os.makedirs(xml_output_dir) + except OSError as e: + # File exists error can occur with concurrent tests + if e.errno != errno.EEXIST: + raise + # Fail early if we can't write to the XML output file. This is so that we + # don't waste people's time running tests that will just fail anyways. + with _open(xml_output_file, 'w'): + pass + + # We can reuse testRunner if it supports XML output (e. g. by inheriting + # from xml_reporter.TextAndXMLTestRunner). Otherwise we need to use + # xml_reporter.TextAndXMLTestRunner. + if (kwargs.get('testRunner') is not None + and not hasattr(kwargs['testRunner'], 'set_default_xml_stream')): + sys.stderr.write('WARNING: XML_OUTPUT_FILE or --xml_output_file setting ' + 'overrides testRunner=%r setting (possibly from --pdb)' + % (kwargs['testRunner'])) + # Passing a class object here allows TestProgram to initialize + # instances based on its kwargs and/or parsed command-line args. + kwargs['testRunner'] = xml_test_runner_class + if kwargs.get('testRunner') is None: + kwargs['testRunner'] = xml_test_runner_class + # Use an in-memory buffer (not backed by the actual file) to store the XML + # report, because some tools modify the file (e.g., create a placeholder + # with partial information, in case the test process crashes). + xml_buffer = io.StringIO() + kwargs['testRunner'].set_default_xml_stream(xml_buffer) # pytype: disable=attribute-error + + # If we've used a seed to randomize test case ordering, we want to record it + # as a top-level attribute in the `testsuites` section of the XML output. + randomize_ordering_seed = getattr( + kwargs['testLoader'], '_randomize_ordering_seed', None) + setter = getattr(kwargs['testRunner'], 'set_testsuites_property', None) + if randomize_ordering_seed and setter: + setter('test_randomize_ordering_seed', randomize_ordering_seed) + elif kwargs.get('testRunner') is None: + kwargs['testRunner'] = _pretty_print_reporter.TextTestRunner + + if FLAGS.pdb_post_mortem: + runner = kwargs['testRunner'] + # testRunner can be a class or an instance, which must be tested for + # differently. + # Overriding testRunner isn't uncommon, so only enable the debugging + # integration if the runner claims it does; we don't want to accidentally + # clobber something on the runner. + if ((isinstance(runner, type) and + issubclass(runner, _pretty_print_reporter.TextTestRunner)) or + isinstance(runner, _pretty_print_reporter.TextTestRunner)): + runner.run_for_debugging = True + + # Make sure tmpdir exists. + if not os.path.isdir(TEST_TMPDIR.value): + try: + os.makedirs(TEST_TMPDIR.value) + except OSError as e: + # Concurrent test might have created the directory. + if e.errno != errno.EEXIST: + raise + + # Let unittest.TestProgram.__init__ do its own argv parsing, e.g. for '-v', + # on argv, which is sys.argv without the command-line flags. + kwargs['argv'] = argv + + # Request unittest.TestProgram to not exit. The exit will be handled by + # `absltest.run_tests`. + kwargs['exit'] = False + + try: + test_program = unittest.TestProgram(*args, **kwargs) + return test_program.result, fail_when_no_tests_ran + finally: + if xml_buffer: + try: + with _open(xml_output_file, 'w') as f: + f.write(xml_buffer.getvalue()) + finally: + xml_buffer.close() + + +def run_tests( + argv: MutableSequence[Text], + args: Sequence[Any], + kwargs: MutableMapping[Text, Any], +) -> None: + """Executes a set of Python unit tests. + + Most users should call absltest.main() instead of run_tests. + + Please note that run_tests should be called from app.run. + Calling absltest.main() would ensure that. + + Please note that run_tests is allowed to make changes to kwargs. + + Args: + argv: sys.argv with the command-line flags removed from the front, i.e. the + argv with which :func:`app.run()` has called + ``__main__.main``. It is passed to + ``unittest.TestProgram.__init__(argv=)``, which does its own flag parsing. + It is ignored if kwargs contains an argv entry. + args: Positional arguments passed through to + ``unittest.TestProgram.__init__``. + kwargs: Keyword arguments passed through to + ``unittest.TestProgram.__init__``. + """ + result, fail_when_no_tests_ran = _run_and_get_tests_result( + argv, args, kwargs, xml_reporter.TextAndXMLTestRunner + ) + if fail_when_no_tests_ran and result.testsRun == 0 and not result.skipped: + # Python 3.12 unittest exits with 5 when no tests ran. The exit code 5 comes + # from pytest which does the same thing. + sys.exit(5) + sys.exit(not result.wasSuccessful()) + + +def _rmtree_ignore_errors(path): + # type: (Text) -> None + if os.path.isfile(path): + try: + os.unlink(path) + except OSError: + pass + else: + shutil.rmtree(path, ignore_errors=True) + + +def _get_first_part(path): + # type: (Text) -> Text + parts = path.split(os.sep, 1) + return parts[0] diff --git a/venv/lib/python3.10/site-packages/absl/testing/flagsaver.py b/venv/lib/python3.10/site-packages/absl/testing/flagsaver.py new file mode 100644 index 0000000000000000000000000000000000000000..7df072292674f14d45b668fe4c36d61208697479 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/flagsaver.py @@ -0,0 +1,386 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Decorator and context manager for saving and restoring flag values. + +There are many ways to save and restore. Always use the most convenient method +for a given use case. + +Here are examples of each method. They all call ``do_stuff()`` while +``FLAGS.someflag`` is temporarily set to ``'foo'``:: + + from absl.testing import flagsaver + + # Use a decorator which can optionally override flags via arguments. + @flagsaver.flagsaver(someflag='foo') + def some_func(): + do_stuff() + + # Use a decorator which can optionally override flags with flagholders. + @flagsaver.flagsaver((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, 23)) + def some_func(): + do_stuff() + + # Use a decorator which does not override flags itself. + @flagsaver.flagsaver + def some_func(): + FLAGS.someflag = 'foo' + do_stuff() + + # Use a context manager which can optionally override flags via arguments. + with flagsaver.flagsaver(someflag='foo'): + do_stuff() + + # Save and restore the flag values yourself. + saved_flag_values = flagsaver.save_flag_values() + try: + FLAGS.someflag = 'foo' + do_stuff() + finally: + flagsaver.restore_flag_values(saved_flag_values) + + # Use the parsing version to emulate users providing the flags. + # Note that all flags must be provided as strings (unparsed). + @flagsaver.as_parsed(some_int_flag='123') + def some_func(): + # Because the flag was parsed it is considered "present". + assert FLAGS.some_int_flag.present + do_stuff() + + # flagsaver.as_parsed() can also be used as a context manager just like + # flagsaver.flagsaver() + with flagsaver.as_parsed(some_int_flag='123'): + do_stuff() + + # The flagsaver.as_parsed() interface also supports FlagHolder objects. + @flagsaver.as_parsed((module.FOO_FLAG, 'foo'), (other_mod.BAR_FLAG, '23')) + def some_func(): + do_stuff() + + # Using as_parsed with a multi_X flag requires a sequence of strings. + @flagsaver.as_parsed(some_multi_int_flag=['123', '456']) + def some_func(): + assert FLAGS.some_multi_int_flag.present + do_stuff() + + # If a flag name includes non-identifier characters it can be specified like + # so: + @flagsaver.as_parsed(**{'i-like-dashes': 'true'}) + def some_func(): + do_stuff() + +We save and restore a shallow copy of each Flag object's ``__dict__`` attribute. +This preserves all attributes of the flag, such as whether or not it was +overridden from its default value. + +WARNING: Currently a flag that is saved and then deleted cannot be restored. An +exception will be raised. However if you *add* a flag after saving flag values, +and then restore flag values, the added flag will be deleted with no errors. +""" + +import collections +import functools +import inspect +from typing import overload, Any, Callable, Mapping, Tuple, TypeVar, Type, Sequence, Union + +from absl import flags + +FLAGS = flags.FLAGS + + +# The type of pre/post wrapped functions. +_CallableT = TypeVar('_CallableT', bound=Callable) + + +@overload +def flagsaver(*args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def flagsaver(func: _CallableT) -> _CallableT: + ... + + +def flagsaver(*args, **kwargs): + """The main flagsaver interface. See module doc for usage.""" + return _construct_overrider(_FlagOverrider, *args, **kwargs) + + +@overload +def as_parsed(*args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def as_parsed(func: _CallableT) -> _CallableT: + ... + + +def as_parsed(*args, **kwargs): + """Overrides flags by parsing strings, saves flag state similar to flagsaver. + + This function can be used as either a decorator or context manager similar to + flagsaver.flagsaver(). However, where flagsaver.flagsaver() directly sets the + flags to new values, this function will parse the provided arguments as if + they were provided on the command line. Among other things, this will cause + `FLAGS['flag_name'].present == True`. + + A note on unparsed input: For many flag types, the unparsed version will be + a single string. However for multi_x (multi_string, multi_integer, multi_enum) + the unparsed version will be a Sequence of strings. + + Args: + *args: Tuples of FlagHolders and their unparsed value. + **kwargs: The keyword args are flag names, and the values are unparsed + values. + + Returns: + _ParsingFlagOverrider that serves as a context manager or decorator. Will + save previous flag state and parse new flags, then on cleanup it will + restore the previous flag state. + """ + return _construct_overrider(_ParsingFlagOverrider, *args, **kwargs) + + +# NOTE: the order of these overload declarations matters. The type checker will +# pick the first match which could be incorrect. +@overload +def _construct_overrider( + flag_overrider_cls: Type['_ParsingFlagOverrider'], + *args: Tuple[flags.FlagHolder, Union[str, Sequence[str]]], + **kwargs: Union[str, Sequence[str]]) -> '_ParsingFlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + *args: Tuple[flags.FlagHolder, Any], + **kwargs: Any) -> '_FlagOverrider': + ... + + +@overload +def _construct_overrider(flag_overrider_cls: Type['_FlagOverrider'], + func: _CallableT) -> _CallableT: + ... + + +def _construct_overrider(flag_overrider_cls, *args, **kwargs): + """Handles the args/kwargs returning an instance of flag_overrider_cls. + + If flag_overrider_cls is _FlagOverrider then values should be native python + types matching the python types. Otherwise if flag_overrider_cls is + _ParsingFlagOverrider the values should be strings or sequences of strings. + + Args: + flag_overrider_cls: The class that will do the overriding. + *args: Tuples of FlagHolder and the new flag value. + **kwargs: Keword args mapping flag name to new flag value. + + Returns: + A _FlagOverrider to be used as a decorator or context manager. + """ + if not args: + return flag_overrider_cls(**kwargs) + # args can be [func] if used as `@flagsaver` instead of `@flagsaver(...)` + if len(args) == 1 and callable(args[0]): + if kwargs: + raise ValueError( + "It's invalid to specify both positional and keyword parameters.") + func = args[0] + if inspect.isclass(func): + raise TypeError('@flagsaver.flagsaver cannot be applied to a class.') + return _wrap(flag_overrider_cls, func, {}) + # args can be a list of (FlagHolder, value) pairs. + # In which case they augment any specified kwargs. + for arg in args: + if not isinstance(arg, tuple) or len(arg) != 2: + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + holder, value = arg + if not isinstance(holder, flags.FlagHolder): + raise ValueError('Expected (FlagHolder, value) pair, found %r' % (arg,)) + if holder.name in kwargs: + raise ValueError('Cannot set --%s multiple times' % holder.name) + kwargs[holder.name] = value + return flag_overrider_cls(**kwargs) + + +def save_flag_values( + flag_values: flags.FlagValues = FLAGS) -> Mapping[str, Mapping[str, Any]]: + """Returns copy of flag values as a dict. + + Args: + flag_values: FlagValues, the FlagValues instance with which the flag will be + saved. This should almost never need to be overridden. + + Returns: + Dictionary mapping keys to values. Keys are flag names, values are + corresponding ``__dict__`` members. E.g. ``{'key': value_dict, ...}``. + """ + return {name: _copy_flag_dict(flag_values[name]) for name in flag_values} + + +def restore_flag_values(saved_flag_values: Mapping[str, Mapping[str, Any]], + flag_values: flags.FlagValues = FLAGS): + """Restores flag values based on the dictionary of flag values. + + Args: + saved_flag_values: {'flag_name': value_dict, ...} + flag_values: FlagValues, the FlagValues instance from which the flag will be + restored. This should almost never need to be overridden. + """ + new_flag_names = list(flag_values) + for name in new_flag_names: + saved = saved_flag_values.get(name) + if saved is None: + # If __dict__ was not saved delete "new" flag. + delattr(flag_values, name) + else: + if flag_values[name].value != saved['_value']: + flag_values[name].value = saved['_value'] # Ensure C++ value is set. + flag_values[name].__dict__ = saved + + +@overload +def _wrap(flag_overrider_cls: Type['_FlagOverrider'], func: _CallableT, + overrides: Mapping[str, Any]) -> _CallableT: + ... + + +@overload +def _wrap(flag_overrider_cls: Type['_ParsingFlagOverrider'], func: _CallableT, + overrides: Mapping[str, Union[str, Sequence[str]]]) -> _CallableT: + ... + + +def _wrap(flag_overrider_cls, func, overrides): + """Creates a wrapper function that saves/restores flag values. + + Args: + flag_overrider_cls: The class that will be used as a context manager. + func: This will be called between saving flags and restoring flags. + overrides: Flag names mapped to their values. These flags will be set after + saving the original flag state. The type of the values depends on if + _FlagOverrider or _ParsingFlagOverrider was specified. + + Returns: + A wrapped version of func. + """ + + @functools.wraps(func) + def _flagsaver_wrapper(*args, **kwargs): + """Wrapper function that saves and restores flags.""" + with flag_overrider_cls(**overrides): + return func(*args, **kwargs) + + return _flagsaver_wrapper + + +class _FlagOverrider(object): + """Overrides flags for the duration of the decorated function call. + + It also restores all original values of flags after decorated method + completes. + """ + + def __init__(self, **overrides: Any): + self._overrides = overrides + self._saved_flag_values = None + + def __call__(self, func: _CallableT) -> _CallableT: + if inspect.isclass(func): + raise TypeError('flagsaver cannot be applied to a class.') + return _wrap(self.__class__, func, self._overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + FLAGS._set_attributes(**self._overrides) + except: + # It may fail because of flag validators. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + def __exit__(self, exc_type, exc_value, traceback): + restore_flag_values(self._saved_flag_values, FLAGS) + + +class _ParsingFlagOverrider(_FlagOverrider): + """Context manager for overriding flags. + + Simulates command line parsing. + + This is simlar to _FlagOverrider except that all **overrides should be + strings or sequences of strings, and when context is entered this class calls + .parse(value) + + This results in the flags having .present set properly. + """ + + def __init__(self, **overrides: Union[str, Sequence[str]]): + for flag_name, new_value in overrides.items(): + if isinstance(new_value, str): + continue + if (isinstance(new_value, collections.abc.Sequence) and + all(isinstance(single_value, str) for single_value in new_value)): + continue + raise TypeError( + f'flagsaver.as_parsed() cannot parse {flag_name}. Expected a single ' + f'string or sequence of strings but {type(new_value)} was provided.') + super().__init__(**overrides) + + def __enter__(self): + self._saved_flag_values = save_flag_values(FLAGS) + try: + for flag_name, unparsed_value in self._overrides.items(): + # LINT.IfChange(flag_override_parsing) + FLAGS[flag_name].parse(unparsed_value) + FLAGS[flag_name].using_default_value = False + # LINT.ThenChange() + + # Perform the validation on all modified flags. This is something that + # FLAGS._set_attributes() does for you in _FlagOverrider. + for flag_name in self._overrides: + FLAGS._assert_validators(FLAGS[flag_name].validators) + + except KeyError as e: + # If a flag doesn't exist, an UnrecognizedFlagError is more specific. + restore_flag_values(self._saved_flag_values, FLAGS) + raise flags.UnrecognizedFlagError('Unknown command line flag.') from e + + except: + # It may fail because of flag validators or general parsing issues. + restore_flag_values(self._saved_flag_values, FLAGS) + raise + + +def _copy_flag_dict(flag: flags.Flag) -> Mapping[str, Any]: + """Returns a copy of the flag object's ``__dict__``. + + It's mostly a shallow copy of the ``__dict__``, except it also does a shallow + copy of the validator list. + + Args: + flag: flags.Flag, the flag to copy. + + Returns: + A copy of the flag object's ``__dict__``. + """ + copy = flag.__dict__.copy() + copy['_value'] = flag.value # Ensure correct restore for C++ flags. + copy['validators'] = list(flag.validators) + return copy diff --git a/venv/lib/python3.10/site-packages/absl/testing/parameterized.py b/venv/lib/python3.10/site-packages/absl/testing/parameterized.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d2c2b51f83358d6e2c83531226c7ea07f526fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/parameterized.py @@ -0,0 +1,724 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Adds support for parameterized tests to Python's unittest TestCase class. + +A parameterized test is a method in a test case that is invoked with different +argument tuples. + +A simple example:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9), + (1, 1, 3)) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +Each invocation is a separate test case and properly isolated just +like a normal test method, with its own setUp/tearDown cycle. In the +example above, there are three separate testcases, one of which will +fail due to an assertion error (1 + 1 != 3). + +Parameters for individual test cases can be tuples (with positional parameters) +or dictionaries (with named parameters):: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + {'op1': 1, 'op2': 2, 'result': 3}, + {'op1': 4, 'op2': 5, 'result': 9}, + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + +If a parameterized test fails, the error message will show the +original test name and the parameters for that test. + +The id method of the test, used internally by the unittest framework, is also +modified to show the arguments (but note that the name reported by `id()` +doesn't match the actual test name, see below). To make sure that test names +stay the same across several invocations, object representations like:: + + >>> class Foo(object): + ... pass + >>> repr(Foo()) + '<__main__.Foo object at 0x23d8610>' + +are turned into ``__main__.Foo``. When selecting a subset of test cases to run +on the command-line, the test cases contain an index suffix for each argument +in the order they were passed to :func:`parameters` (eg. testAddition0, +testAddition1, etc.) This naming scheme is subject to change; for more reliable +and stable names, especially in test logs, use :func:`named_parameters` instead. + +Tests using :func:`named_parameters` are similar to :func:`parameters`, except +only tuples or dicts of args are supported. For tuples, the first parameter arg +has to be a string (or an object that returns an apt name when converted via +``str()``). For dicts, a value for the key ``testcase_name`` must be present and +must be a string (or an object that returns an apt name when converted via +``str()``):: + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + ('Normal', 'aa', 'aaa', True), + ('EmptyPrefix', '', 'abc', True), + ('BothEmpty', '', '', True)) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + + class NamedExample(parameterized.TestCase): + @parameterized.named_parameters( + {'testcase_name': 'Normal', + 'result': True, 'string': 'aaa', 'prefix': 'aa'}, + {'testcase_name': 'EmptyPrefix', + 'result': True, 'string': 'abc', 'prefix': ''}, + {'testcase_name': 'BothEmpty', + 'result': True, 'string': '', 'prefix': ''}) + def testStartsWith(self, prefix, string, result): + self.assertEqual(result, string.startswith(prefix)) + +Named tests also have the benefit that they can be run individually +from the command line:: + + $ testmodule.py NamedExample.testStartsWithNormal + . + -------------------------------------------------------------------- + Ran 1 test in 0.000s + + OK + +Parameterized Classes +===================== + +If invocation arguments are shared across test methods in a single +TestCase class, instead of decorating all test methods +individually, the class itself can be decorated:: + + @parameterized.parameters( + (1, 2, 3), + (4, 5, 9)) + class ArithmeticTest(parameterized.TestCase): + def testAdd(self, arg1, arg2, result): + self.assertEqual(arg1 + arg2, result) + + def testSubtract(self, arg1, arg2, result): + self.assertEqual(result - arg1, arg2) + +Inputs from Iterables +===================== + +If parameters should be shared across several test cases, or are dynamically +created from other sources, a single non-tuple iterable can be passed into +the decorator. This iterable will be used to obtain the test cases:: + + class AdditionExample(parameterized.TestCase): + @parameterized.parameters( + c.op1, c.op2, c.result for c in testcases + ) + def testAddition(self, op1, op2, result): + self.assertEqual(result, op1 + op2) + + +Single-Argument Test Methods +============================ + +If a test method takes only one argument, the single arguments must not be +wrapped into a tuple:: + + class NegativeNumberExample(parameterized.TestCase): + @parameterized.parameters( + -1, -3, -4, -5 + ) + def testIsNegative(self, arg): + self.assertTrue(IsNegative(arg)) + + +List/tuple as a Single Argument +=============================== + +If a test method takes a single argument of a list/tuple, it must be wrapped +inside a tuple:: + + class ZeroSumExample(parameterized.TestCase): + @parameterized.parameters( + ([-1, 0, 1], ), + ([-2, 0, 2], ), + ) + def testSumIsZero(self, arg): + self.assertEqual(0, sum(arg)) + + +Cartesian product of Parameter Values as Parameterized Test Cases +================================================================= + +If required to test method over a cartesian product of parameters, +`parameterized.product` may be used to facilitate generation of parameters +test combinations:: + + class TestModuloExample(parameterized.TestCase): + @parameterized.product( + num=[0, 20, 80], + modulo=[2, 4], + expected=[0] + ) + def testModuloResult(self, num, modulo, expected): + self.assertEqual(expected, num % modulo) + +This results in 6 test cases being created - one for each combination of the +parameters. It is also possible to supply sequences of keyword argument dicts +as elements of the cartesian product:: + + @parameterized.product( + (dict(num=5, modulo=3, expected=2), + dict(num=7, modulo=4, expected=3)), + dtype=(int, float) + ) + def testModuloResult(self, num, modulo, expected, dtype): + self.assertEqual(expected, dtype(num) % modulo) + +This results in 4 test cases being created - for each of the two sets of test +data (supplied as kwarg dicts) and for each of the two data types (supplied as +a named parameter). Multiple keyword argument dicts may be supplied if required. + +Async Support +============= + +If a test needs to call async functions, it can inherit from both +parameterized.TestCase and another TestCase that supports async calls, such +as [asynctest](https://github.com/Martiusweb/asynctest):: + + import asynctest + + class AsyncExample(parameterized.TestCase, asynctest.TestCase): + @parameterized.parameters( + ('a', 1), + ('b', 2), + ) + async def testSomeAsyncFunction(self, arg, expected): + actual = await someAsyncFunction(arg) + self.assertEqual(actual, expected) +""" + +from collections import abc +import functools +import inspect +import itertools +import re +import types +import unittest +import warnings + +from absl.testing import absltest + + +_ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>') +_NAMED = object() +_ARGUMENT_REPR = object() +_NAMED_DICT_KEY = 'testcase_name' + + +class NoTestsError(Exception): + """Raised when parameterized decorators do not generate any tests.""" + + +class DuplicateTestNameError(Exception): + """Raised when a parameterized test has the same test name multiple times.""" + + def __init__(self, test_class_name, new_test_name, original_test_name): + super(DuplicateTestNameError, self).__init__( + 'Duplicate parameterized test name in {}: generated test name {!r} ' + '(generated from {!r}) already exists. Consider using ' + 'named_parameters() to give your tests unique names and/or renaming ' + 'the conflicting test method.'.format( + test_class_name, new_test_name, original_test_name)) + + +def _clean_repr(obj): + return _ADDR_RE.sub(r'<\1>', repr(obj)) + + +def _non_string_or_bytes_iterable(obj): + return (isinstance(obj, abc.Iterable) and not isinstance(obj, str) and + not isinstance(obj, bytes)) + + +def _format_parameter_list(testcase_params): + if isinstance(testcase_params, abc.Mapping): + return ', '.join('%s=%s' % (argname, _clean_repr(value)) + for argname, value in testcase_params.items()) + elif _non_string_or_bytes_iterable(testcase_params): + return ', '.join(map(_clean_repr, testcase_params)) + else: + return _format_parameter_list((testcase_params,)) + + +def _async_wrapped(func): + @functools.wraps(func) + async def wrapper(*args, **kwargs): + return await func(*args, **kwargs) + return wrapper + + +class _ParameterizedTestIter(object): + """Callable and iterable class for producing new test cases.""" + + def __init__(self, test_method, testcases, naming_type, original_name=None): + """Returns concrete test functions for a test and a list of parameters. + + The naming_type is used to determine the name of the concrete + functions as reported by the unittest framework. If naming_type is + _FIRST_ARG, the testcases must be tuples, and the first element must + have a string representation that is a valid Python identifier. + + Args: + test_method: The decorated test method. + testcases: (list of tuple/dict) A list of parameter tuples/dicts for + individual test invocations. + naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR. + original_name: The original test method name. When decorated on a test + method, None is passed to __init__ and test_method.__name__ is used. + Note test_method.__name__ might be different than the original defined + test method because of the use of other decorators. A more accurate + value is set by TestGeneratorMetaclass.__new__ later. + """ + self._test_method = test_method + self.testcases = testcases + self._naming_type = naming_type + if original_name is None: + original_name = test_method.__name__ + self._original_name = original_name + self.__name__ = _ParameterizedTestIter.__name__ + + def __call__(self, *args, **kwargs): + raise RuntimeError('You appear to be running a parameterized test case ' + 'without having inherited from parameterized.' + 'TestCase. This is bad because none of ' + 'your test cases are actually being run. You may also ' + 'be using another decorator before the parameterized ' + 'one, in which case you should reverse the order.') + + def __iter__(self): + test_method = self._test_method + naming_type = self._naming_type + + def make_bound_param_test(testcase_params): + @functools.wraps(test_method) + def bound_param_test(self): + if isinstance(testcase_params, abc.Mapping): + return test_method(self, **testcase_params) + elif _non_string_or_bytes_iterable(testcase_params): + return test_method(self, *testcase_params) + else: + return test_method(self, testcase_params) + + if naming_type is _NAMED: + # Signal the metaclass that the name of the test function is unique + # and descriptive. + bound_param_test.__x_use_name__ = True + + testcase_name = None + if isinstance(testcase_params, abc.Mapping): + if _NAMED_DICT_KEY not in testcase_params: + raise RuntimeError( + 'Dict for named tests must contain key "%s"' % _NAMED_DICT_KEY) + # Create a new dict to avoid modifying the supplied testcase_params. + testcase_name = testcase_params[_NAMED_DICT_KEY] + testcase_params = { + k: v for k, v in testcase_params.items() if k != _NAMED_DICT_KEY + } + elif _non_string_or_bytes_iterable(testcase_params): + if not isinstance(testcase_params[0], str): + raise RuntimeError( + 'The first element of named test parameters is the test name ' + 'suffix and must be a string') + testcase_name = testcase_params[0] + testcase_params = testcase_params[1:] + else: + raise RuntimeError( + 'Named tests must be passed a dict or non-string iterable.') + + test_method_name = self._original_name + # Support PEP-8 underscore style for test naming if used. + if (test_method_name.startswith('test_') + and testcase_name + and not testcase_name.startswith('_')): + test_method_name += '_' + + bound_param_test.__name__ = test_method_name + str(testcase_name) + elif naming_type is _ARGUMENT_REPR: + # If it's a generator, convert it to a tuple and treat them as + # parameters. + if isinstance(testcase_params, types.GeneratorType): + testcase_params = tuple(testcase_params) + # The metaclass creates a unique, but non-descriptive method name for + # _ARGUMENT_REPR tests using an indexed suffix. + # To keep test names descriptive, only the original method name is used. + # To make sure test names are unique, we add a unique descriptive suffix + # __x_params_repr__ for every test. + params_repr = '(%s)' % (_format_parameter_list(testcase_params),) + bound_param_test.__x_params_repr__ = params_repr + else: + raise RuntimeError('%s is not a valid naming type.' % (naming_type,)) + + bound_param_test.__doc__ = '%s(%s)' % ( + bound_param_test.__name__, _format_parameter_list(testcase_params)) + if test_method.__doc__: + bound_param_test.__doc__ += '\n%s' % (test_method.__doc__,) + if inspect.iscoroutinefunction(test_method): + return _async_wrapped(bound_param_test) + return bound_param_test + + return (make_bound_param_test(c) for c in self.testcases) + + +def _modify_class(class_object, testcases, naming_type): + assert not getattr(class_object, '_test_params_reprs', None), ( + 'Cannot add parameters to %s. Either it already has parameterized ' + 'methods, or its super class is also a parameterized class.' % ( + class_object,)) + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + class_object._test_params_reprs = test_params_reprs = {} + for name, obj in class_object.__dict__.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) + and isinstance(obj, types.FunctionType)): + delattr(class_object, name) + methods = {} + _update_class_dict_for_param_test_case( + class_object.__name__, methods, test_params_reprs, name, + _ParameterizedTestIter(obj, testcases, naming_type, name)) + for meth_name, meth in methods.items(): + setattr(class_object, meth_name, meth) + + +def _parameter_decorator(naming_type, testcases): + """Implementation of the parameterization decorators. + + Args: + naming_type: The naming type. + testcases: Testcase parameters. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A function for modifying the decorated object. + """ + def _apply(obj): + if isinstance(obj, type): + _modify_class(obj, testcases, naming_type) + return obj + else: + return _ParameterizedTestIter(obj, testcases, naming_type) + + if (len(testcases) == 1 and + not isinstance(testcases[0], tuple) and + not isinstance(testcases[0], abc.Mapping)): + # Support using a single non-tuple parameter as a list of test cases. + # Note that the single non-tuple parameter can't be Mapping either, which + # means a single dict parameter case. + assert _non_string_or_bytes_iterable(testcases[0]), ( + 'Single parameter argument must be a non-string non-Mapping iterable') + testcases = testcases[0] + + if not isinstance(testcases, abc.Sequence): + testcases = list(testcases) + if not testcases: + raise NoTestsError( + 'parameterized test decorators did not generate any tests. ' + 'Make sure you specify non-empty parameters, ' + 'and do not reuse generators more than once.') + + return _apply + + +def parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. + + Args: + *testcases: Parameters for the decorated method, either a single + iterable, or a list of tuples/dicts/objects (for tests with only one + argument). + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +def named_parameters(*testcases): + """A decorator for creating parameterized tests. + + See the module docstring for a usage example. For every parameter tuple + passed, the first element of the tuple should be a string and will be appended + to the name of the test method. Each parameter dict passed must have a value + for the key "testcase_name", the string representation of that value will be + appended to the name of the test method. + + Args: + *testcases: Parameters for the decorated method, either a single iterable, + or a list of tuples or dicts. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + return _parameter_decorator(_NAMED, testcases) + + +def product(*kwargs_seqs, **testgrid): + """A decorator for running tests over cartesian product of parameters values. + + See the module docstring for a usage example. The test will be run for every + possible combination of the parameters. + + Args: + *kwargs_seqs: Each positional parameter is a sequence of keyword arg dicts; + every test case generated will include exactly one kwargs dict from each + positional parameter; these will then be merged to form an overall list + of arguments for the test case. + **testgrid: A mapping of parameter names and their possible values. Possible + values should given as either a list or a tuple. + + Raises: + NoTestsError: Raised when the decorator generates no tests. + + Returns: + A test generator to be handled by TestGeneratorMetaclass. + """ + + for name, values in testgrid.items(): + assert isinstance(values, (list, tuple)), ( + 'Values of {} must be given as list or tuple, found {}'.format( + name, type(values))) + + prior_arg_names = set() + for kwargs_seq in kwargs_seqs: + assert ((isinstance(kwargs_seq, (list, tuple))) and + all(isinstance(kwargs, dict) for kwargs in kwargs_seq)), ( + 'Positional parameters must be a sequence of keyword arg' + 'dicts, found {}' + .format(kwargs_seq)) + if kwargs_seq: + arg_names = set(kwargs_seq[0]) + assert all(set(kwargs) == arg_names for kwargs in kwargs_seq), ( + 'Keyword argument dicts within a single parameter must all have the ' + 'same keys, found {}'.format(kwargs_seq)) + assert not (arg_names & prior_arg_names), ( + 'Keyword argument dict sequences must all have distinct argument ' + 'names, found duplicate(s) {}' + .format(sorted(arg_names & prior_arg_names))) + prior_arg_names |= arg_names + + assert not (prior_arg_names & set(testgrid)), ( + 'Arguments supplied in kwargs dicts in positional parameters must not ' + 'overlap with arguments supplied as named parameters; found duplicate ' + 'argument(s) {}'.format(sorted(prior_arg_names & set(testgrid)))) + + # Convert testgrid into a sequence of sequences of kwargs dicts and combine + # with the positional parameters. + # So foo=[1,2], bar=[3,4] --> [[{foo: 1}, {foo: 2}], [{bar: 3, bar: 4}]] + testgrid = (tuple({k: v} for v in vs) for k, vs in testgrid.items()) + testgrid = tuple(kwargs_seqs) + tuple(testgrid) + + # Create all possible combinations of parameters as a cartesian product + # of parameter values. + testcases = [ + dict(itertools.chain.from_iterable(case.items() + for case in cases)) + for cases in itertools.product(*testgrid) + ] + return _parameter_decorator(_ARGUMENT_REPR, testcases) + + +class TestGeneratorMetaclass(type): + """Metaclass for adding tests generated by parameterized decorators.""" + + def __new__(cls, class_name, bases, dct): + # NOTE: _test_params_repr is private to parameterized.TestCase and it's + # metaclass; do not use it outside of those classes. + test_params_reprs = dct.setdefault('_test_params_reprs', {}) + for name, obj in dct.copy().items(): + if (name.startswith(unittest.TestLoader.testMethodPrefix) and + _non_string_or_bytes_iterable(obj)): + # NOTE: `obj` might not be a _ParameterizedTestIter in two cases: + # 1. a class-level iterable named test* that isn't a test, such as + # a list of something. Such attributes get deleted from the class. + # + # 2. If a decorator is applied to the parameterized test, e.g. + # @morestuff + # @parameterized.parameters(...) + # def test_foo(...): ... + # + # This is OK so long as the underlying parameterized function state + # is forwarded (e.g. using functool.wraps() and **without** + # accessing explicitly accessing the internal attributes. + if isinstance(obj, _ParameterizedTestIter): + # Update the original test method name so it's more accurate. + # The mismatch might happen when another decorator is used inside + # the parameterized decrators, and the inner decorator doesn't + # preserve its __name__. + obj._original_name = name + iterator = iter(obj) + dct.pop(name) + _update_class_dict_for_param_test_case( + class_name, dct, test_params_reprs, name, iterator) + # If the base class is a subclass of parameterized.TestCase, inherit its + # _test_params_reprs too. + for base in bases: + # Check if the base has _test_params_reprs first, then check if it's a + # subclass of parameterized.TestCase. Otherwise when this is called for + # the parameterized.TestCase definition itself, this raises because + # itself is not defined yet. This works as long as absltest.TestCase does + # not define _test_params_reprs. + base_test_params_reprs = getattr(base, '_test_params_reprs', None) + if base_test_params_reprs and issubclass(base, TestCase): + for test_method, test_method_id in base_test_params_reprs.items(): + # test_method may both exists in base and this class. + # This class's method overrides base class's. + # That's why it should only inherit it if it does not exist. + test_params_reprs.setdefault(test_method, test_method_id) + + return type.__new__(cls, class_name, bases, dct) + + +def _update_class_dict_for_param_test_case( + test_class_name, dct, test_params_reprs, name, iterator): + """Adds individual test cases to a dictionary. + + Args: + test_class_name: The name of the class tests are added to. + dct: The target dictionary. + test_params_reprs: The dictionary for mapping names to test IDs. + name: The original name of the test case. + iterator: The iterator generating the individual test cases. + + Raises: + DuplicateTestNameError: Raised when a test name occurs multiple times. + RuntimeError: If non-parameterized functions are generated. + """ + for idx, func in enumerate(iterator): + assert callable(func), 'Test generators must yield callables, got %r' % ( + func,) + if not (getattr(func, '__x_use_name__', None) or + getattr(func, '__x_params_repr__', None)): + raise RuntimeError( + '{}.{} generated a test function without using the parameterized ' + 'decorators. Only tests generated using the decorators are ' + 'supported.'.format(test_class_name, name)) + + if getattr(func, '__x_use_name__', False): + original_name = func.__name__ + new_name = original_name + else: + original_name = name + new_name = '%s%d' % (original_name, idx) + + if new_name in dct: + raise DuplicateTestNameError(test_class_name, new_name, original_name) + + dct[new_name] = func + test_params_reprs[new_name] = getattr(func, '__x_params_repr__', '') + + +class TestCase(absltest.TestCase, metaclass=TestGeneratorMetaclass): + """Base class for test cases using the parameters decorator.""" + + # visibility: private; do not call outside this class. + def _get_params_repr(self): + return self._test_params_reprs.get(self._testMethodName, '') + + def __str__(self): + params_repr = self._get_params_repr() + if params_repr: + params_repr = ' ' + params_repr + return '{}{} ({})'.format( + self._testMethodName, params_repr, + unittest.util.strclass(self.__class__)) + + def id(self): + """Returns the descriptive ID of the test. + + This is used internally by the unittesting framework to get a name + for the test to be used in reports. + + Returns: + The test id. + """ + base = super(TestCase, self).id() + params_repr = self._get_params_repr() + if params_repr: + # We include the params in the id so that, when reported in the + # test.xml file, the value is more informative than just "test_foo0". + # Use a space to separate them so that it's copy/paste friendly and + # easy to identify the actual test id. + return '{} {}'.format(base, params_repr) + else: + return base + + +# This function is kept CamelCase because it's used as a class's base class. +def CoopTestCase(other_base_class): # pylint: disable=invalid-name + """Returns a new base class with a cooperative metaclass base. + + This enables the TestCase to be used in combination + with other base classes that have custom metaclasses, such as + ``mox.MoxTestBase``. + + Only works with metaclasses that do not override ``type.__new__``. + + Example:: + + from absl.testing import parameterized + + class ExampleTest(parameterized.CoopTestCase(OtherTestCase)): + ... + + Args: + other_base_class: (class) A test case base class. + + Returns: + A new class object. + """ + # If the other base class has a metaclass of 'type' then trying to combine + # the metaclasses will result in an MRO error. So simply combine them and + # return. + if type(other_base_class) == type: # pylint: disable=unidiomatic-typecheck + warnings.warn( + 'CoopTestCase is only necessary when combining with a class that uses' + ' a metaclass. Use multiple inheritance like this instead: class' + f' ExampleTest(paramaterized.TestCase, {other_base_class.__name__}):', + stacklevel=2, + ) + + class CoopTestCaseBase(other_base_class, TestCase): + pass + + return CoopTestCaseBase + else: + + class CoopMetaclass(type(other_base_class), TestGeneratorMetaclass): # pylint: disable=unused-variable + pass + + class CoopTestCaseBase(other_base_class, TestCase, metaclass=CoopMetaclass): + pass + + return CoopTestCaseBase diff --git a/venv/lib/python3.10/site-packages/absl/testing/xml_reporter.py b/venv/lib/python3.10/site-packages/absl/testing/xml_reporter.py new file mode 100644 index 0000000000000000000000000000000000000000..4fcb60c8545c6fbdcd993964dab61b9f833298e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/absl/testing/xml_reporter.py @@ -0,0 +1,563 @@ +# Copyright 2017 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A Python test reporter that generates test reports in JUnit XML format.""" + +import datetime +import re +import sys +import threading +import time +import traceback +import unittest +from xml.sax import saxutils +from absl.testing import _pretty_print_reporter + + +# See http://www.w3.org/TR/REC-xml/#NT-Char +_bad_control_character_codes = set(range(0, 0x20)) - {0x9, 0xA, 0xD} + + +_control_character_conversions = { + chr(i): '\\x{:02x}'.format(i) for i in _bad_control_character_codes} + + +_escape_xml_attr_conversions = { + '"': '"', + "'": ''', + '\n': ' ', + '\t': ' ', + '\r': ' ', + ' ': ' '} +_escape_xml_attr_conversions.update(_control_character_conversions) + + +# When class or module level function fails, unittest/suite.py adds a +# _ErrorHolder instance instead of a real TestCase, and it has a description +# like "setUpClass (__main__.MyTestCase)". +_CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX = re.compile(r'^(\w+) \((\S+)\)$') + + +# NOTE: while saxutils.quoteattr() theoretically does the same thing; it +# seems to often end up being too smart for it's own good not escaping properly. +# This function is much more reliable. +def _escape_xml_attr(content): + """Escapes xml attributes.""" + # Note: saxutils doesn't escape the quotes. + return saxutils.escape(content, _escape_xml_attr_conversions) + + +def _escape_cdata(s): + """Escapes a string to be used as XML CDATA. + + CDATA characters are treated strictly as character data, not as XML markup, + but there are still certain restrictions on them. + + Args: + s: the string to be escaped. + Returns: + An escaped version of the input string. + """ + for char, escaped in _control_character_conversions.items(): + s = s.replace(char, escaped) + return s.replace(']]>', ']] >') + + +def _iso8601_timestamp(timestamp): + """Produces an ISO8601 datetime. + + Args: + timestamp: an Epoch based timestamp in seconds. + + Returns: + A iso8601 format timestamp if the input is a valid timestamp, None otherwise + """ + if timestamp is None or timestamp < 0: + return None + return datetime.datetime.fromtimestamp( + timestamp, tz=datetime.timezone.utc).isoformat() + + +def _print_xml_element_header(element, attributes, stream, indentation=''): + """Prints an XML header of an arbitrary element. + + Args: + element: element name (testsuites, testsuite, testcase) + attributes: 2-tuple list with (attributes, values) already escaped + stream: output stream to write test report XML to + indentation: indentation added to the element header + """ + stream.write('%s<%s' % (indentation, element)) + for attribute in attributes: + if (len(attribute) == 2 and attribute[0] is not None and + attribute[1] is not None): + stream.write(' %s="%s"' % (attribute[0], attribute[1])) + stream.write('>\n') + +# Copy time.time which ensures the real time is used internally. +# This prevents bad interactions with tests that stub out time. +_time_copy = time.time + +if hasattr(traceback, '_some_str'): + # Use the traceback module str function to format safely. + _safe_str = traceback._some_str +else: + _safe_str = str # pylint: disable=invalid-name + + +class _TestCaseResult(object): + """Private helper for _TextAndXMLTestResult that represents a test result. + + Attributes: + test: A TestCase instance of an individual test method. + name: The name of the individual test method. + full_class_name: The full name of the test class. + run_time: The duration (in seconds) it took to run the test. + start_time: Epoch relative timestamp of when test started (in seconds) + errors: A list of error 4-tuples. Error tuple entries are + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: A string explaining why the test was skipped. + """ + + def __init__(self, test): + self.run_time = -1 + self.start_time = -1 + self.skip_reason = None + self.errors = [] + self.test = test + + # Parse the test id to get its test name and full class path. + # Unfortunately there is no better way of knowning the test and class. + # Worse, unittest uses _ErrorHandler instances to represent class / module + # level failures. + test_desc = test.id() or str(test) + # Check if it's something like "setUpClass (__main__.TestCase)". + match = _CLASS_OR_MODULE_LEVEL_TEST_DESC_REGEX.match(test_desc) + if match: + name = match.group(1) + full_class_name = match.group(2) + else: + class_name = unittest.util.strclass(test.__class__) + if isinstance(test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + class_name = unittest.util.strclass(test.test_case.__class__) + if test_desc.startswith(class_name + '.'): + # In a typical unittest.TestCase scenario, test.id() returns with + # a class name formatted using unittest.util.strclass. + name = test_desc[len(class_name)+1:] + full_class_name = class_name + else: + # Otherwise make a best effort to guess the test name and full class + # path. + parts = test_desc.rsplit('.', 1) + name = parts[-1] + full_class_name = parts[0] if len(parts) == 2 else '' + self.name = _escape_xml_attr(name) + self.full_class_name = _escape_xml_attr(full_class_name) + + def set_run_time(self, time_in_secs): + self.run_time = time_in_secs + + def set_start_time(self, time_in_secs): + self.start_time = time_in_secs + + def print_xml_summary(self, stream): + """Prints an XML Summary of a TestCase. + + Status and result are populated as per JUnit XML test result reporter. + A test that has been skipped will always have a skip reason, + as every skip method in Python's unittest requires the reason arg to be + passed. + + Args: + stream: output stream to write test report XML to + """ + + if self.skip_reason is None: + status = 'run' + result = 'completed' + else: + status = 'notrun' + result = 'suppressed' + + test_case_attributes = [ + ('name', '%s' % self.name), + ('status', '%s' % status), + ('result', '%s' % result), + ('time', '%.3f' % self.run_time), + ('classname', self.full_class_name), + ('timestamp', _iso8601_timestamp(self.start_time)), + ] + _print_xml_element_header('testcase', test_case_attributes, stream, ' ') + self._print_testcase_details(stream) + stream.write(' \n') + + def _print_testcase_details(self, stream): + for error in self.errors: + outcome, exception_type, message, error_msg = error # pylint: disable=unpacking-non-sequence + message = _escape_xml_attr(_safe_str(message)) + exception_type = _escape_xml_attr(str(exception_type)) + error_msg = _escape_cdata(error_msg) + stream.write(' <%s message="%s" type="%s">\n' + % (outcome, message, exception_type, error_msg, outcome)) + + +class _TestSuiteResult(object): + """Private helper for _TextAndXMLTestResult.""" + + def __init__(self): + self.suites = {} + self.failure_counts = {} + self.error_counts = {} + self.overall_start_time = -1 + self.overall_end_time = -1 + self._testsuites_properties = {} + + def add_test_case_result(self, test_case_result): + suite_name = type(test_case_result.test).__name__ + if suite_name == '_ErrorHolder': + # _ErrorHolder is a special case created by unittest for class / module + # level functions. + suite_name = test_case_result.full_class_name.rsplit('.')[-1] + if isinstance(test_case_result.test, unittest.case._SubTest): + # If the test case is a _SubTest, the real TestCase instance is + # available as _SubTest.test_case. + suite_name = type(test_case_result.test.test_case).__name__ + + self._setup_test_suite(suite_name) + self.suites[suite_name].append(test_case_result) + for error in test_case_result.errors: + # Only count the first failure or error so that the sum is equal to the + # total number of *testcases* that have failures or errors. + if error[0] == 'failure': + self.failure_counts[suite_name] += 1 + break + elif error[0] == 'error': + self.error_counts[suite_name] += 1 + break + + def print_xml_summary(self, stream): + overall_test_count = sum(len(x) for x in self.suites.values()) + overall_failures = sum(self.failure_counts.values()) + overall_errors = sum(self.error_counts.values()) + overall_attributes = [ + ('name', ''), + ('tests', '%d' % overall_test_count), + ('failures', '%d' % overall_failures), + ('errors', '%d' % overall_errors), + ('time', '%.3f' % (self.overall_end_time - self.overall_start_time)), + ('timestamp', _iso8601_timestamp(self.overall_start_time)), + ] + _print_xml_element_header('testsuites', overall_attributes, stream) + if self._testsuites_properties: + stream.write(' \n') + for name, value in sorted(self._testsuites_properties.items()): + stream.write(' \n' % + (_escape_xml_attr(name), _escape_xml_attr(str(value)))) + stream.write(' \n') + + for suite_name in self.suites: + suite = self.suites[suite_name] + suite_end_time = max(x.start_time + x.run_time for x in suite) + suite_start_time = min(x.start_time for x in suite) + failures = self.failure_counts[suite_name] + errors = self.error_counts[suite_name] + suite_attributes = [ + ('name', '%s' % suite_name), + ('tests', '%d' % len(suite)), + ('failures', '%d' % failures), + ('errors', '%d' % errors), + ('time', '%.3f' % (suite_end_time - suite_start_time)), + ('timestamp', _iso8601_timestamp(suite_start_time)), + ] + _print_xml_element_header('testsuite', suite_attributes, stream) + + # test_case_result entries are not guaranteed to be in any user-friendly + # order, especially when using subtests. So sort them. + for test_case_result in sorted(suite, key=lambda t: t.name): + test_case_result.print_xml_summary(stream) + stream.write('\n') + stream.write('\n') + + def _setup_test_suite(self, suite_name): + """Adds a test suite to the set of suites tracked by this test run. + + Args: + suite_name: string, The name of the test suite being initialized. + """ + if suite_name in self.suites: + return + self.suites[suite_name] = [] + self.failure_counts[suite_name] = 0 + self.error_counts[suite_name] = 0 + + def set_end_time(self, timestamp_in_secs): + """Sets the start timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_end_time = timestamp_in_secs + + def set_start_time(self, timestamp_in_secs): + """Sets the end timestamp of this test suite. + + Args: + timestamp_in_secs: timestamp in seconds since epoch + """ + self.overall_start_time = timestamp_in_secs + + +class _TextAndXMLTestResult(_pretty_print_reporter.TextTestResult): + """Private TestResult class that produces both formatted text results and XML. + + Used by TextAndXMLTestRunner. + """ + + _TEST_SUITE_RESULT_CLASS = _TestSuiteResult + _TEST_CASE_RESULT_CLASS = _TestCaseResult + + def __init__(self, xml_stream, stream, descriptions, verbosity, + time_getter=_time_copy, testsuites_properties=None): + super(_TextAndXMLTestResult, self).__init__(stream, descriptions, verbosity) + self.xml_stream = xml_stream + self.pending_test_case_results = {} + self.suite = self._TEST_SUITE_RESULT_CLASS() + if testsuites_properties: + self.suite._testsuites_properties = testsuites_properties + self.time_getter = time_getter + + # This lock guards any mutations on pending_test_case_results. + self._pending_test_case_results_lock = threading.RLock() + + def startTest(self, test): + self.start_time = self.time_getter() + super(_TextAndXMLTestResult, self).startTest(test) + + def stopTest(self, test): + # Grabbing the write lock to avoid conflicting with stopTestRun. + with self._pending_test_case_results_lock: + super(_TextAndXMLTestResult, self).stopTest(test) + result = self.get_pending_test_case_result(test) + if not result: + test_name = test.id() or str(test) + sys.stderr.write('No pending test case: %s\n' % test_name) + return + if getattr(self, 'start_time', None) is None: + # startTest may not be called for skipped tests since Python 3.12.1. + self.start_time = self.time_getter() + test_id = id(test) + run_time = self.time_getter() - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + del self.pending_test_case_results[test_id] + + def startTestRun(self): + self.suite.set_start_time(self.time_getter()) + super(_TextAndXMLTestResult, self).startTestRun() + + def stopTestRun(self): + self.suite.set_end_time(self.time_getter()) + # All pending_test_case_results will be added to the suite and removed from + # the pending_test_case_results dictionary. Grabbing the write lock to avoid + # results from being added during this process to avoid duplicating adds or + # accidentally erasing newly appended pending results. + with self._pending_test_case_results_lock: + # Errors in the test fixture (setUpModule, tearDownModule, + # setUpClass, tearDownClass) can leave a pending result which + # never gets added to the suite. The runner calls stopTestRun + # which gives us an opportunity to add these errors for + # reporting here. + for test_id in self.pending_test_case_results: + result = self.pending_test_case_results[test_id] + if getattr(self, 'start_time', None) is not None: + run_time = self.suite.overall_end_time - self.start_time + result.set_run_time(run_time) + result.set_start_time(self.start_time) + self.suite.add_test_case_result(result) + self.pending_test_case_results.clear() + + def _exc_info_to_string(self, err, test=None): + """Converts a sys.exc_info()-style tuple of values into a string. + + This method must be overridden because the method signature in + unittest.TestResult changed between Python 2.2 and 2.4. + + Args: + err: A sys.exc_info() tuple of values for an error. + test: The test method. + + Returns: + A formatted exception string. + """ + if test: + return super(_TextAndXMLTestResult, self)._exc_info_to_string(err, test) + return ''.join(traceback.format_exception(*err)) + + def add_pending_test_case_result(self, test, error_summary=None, + skip_reason=None): + """Adds result information to a test case result which may still be running. + + If a result entry for the test already exists, add_pending_test_case_result + will add error summary tuples and/or overwrite skip_reason for the result. + If it does not yet exist, a result entry will be created. + Note that a test result is considered to have been run and passed + only if there are no errors or skip_reason. + + Args: + test: A test method as defined by unittest + error_summary: A 4-tuple with the following entries: + 1) a string identifier of either "failure" or "error" + 2) an exception_type + 3) an exception_message + 4) a string version of a sys.exc_info()-style tuple of values + ('error', err[0], err[1], self._exc_info_to_string(err)) + If the length of errors is 0, then the test is either passed or + skipped. + skip_reason: a string explaining why the test was skipped + """ + with self._pending_test_case_results_lock: + test_id = id(test) + if test_id not in self.pending_test_case_results: + self.pending_test_case_results[test_id] = self._TEST_CASE_RESULT_CLASS( + test) + if error_summary: + self.pending_test_case_results[test_id].errors.append(error_summary) + if skip_reason: + self.pending_test_case_results[test_id].skip_reason = skip_reason + + def delete_pending_test_case_result(self, test): + with self._pending_test_case_results_lock: + test_id = id(test) + del self.pending_test_case_results[test_id] + + def get_pending_test_case_result(self, test): + test_id = id(test) + return self.pending_test_case_results.get(test_id, None) + + def addSuccess(self, test): + super(_TextAndXMLTestResult, self).addSuccess(test) + self.add_pending_test_case_result(test) + + def addError(self, test, err): + super(_TextAndXMLTestResult, self).addError(test, err) + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addFailure(self, test, err): + super(_TextAndXMLTestResult, self).addFailure(test, err) + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSkip(self, test, reason): + super(_TextAndXMLTestResult, self).addSkip(test, reason) + self.add_pending_test_case_result(test, skip_reason=reason) + + def addExpectedFailure(self, test, err): + super(_TextAndXMLTestResult, self).addExpectedFailure(test, err) + if callable(getattr(test, 'recordProperty', None)): + test.recordProperty('EXPECTED_FAILURE', + self._exc_info_to_string(err, test=test)) + self.add_pending_test_case_result(test) + + def addUnexpectedSuccess(self, test): + super(_TextAndXMLTestResult, self).addUnexpectedSuccess(test) + test_name = test.id() or str(test) + error_summary = ('error', '', '', + 'Test case %s should have failed, but passed.' + % (test_name)) + self.add_pending_test_case_result(test, error_summary=error_summary) + + def addSubTest(self, test, subtest, err): # pylint: disable=invalid-name + super(_TextAndXMLTestResult, self).addSubTest(test, subtest, err) + if err is not None: + if issubclass(err[0], test.failureException): + error_summary = ('failure', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = ('error', err[0], err[1], + self._exc_info_to_string(err, test=test)) + else: + error_summary = None + self.add_pending_test_case_result(subtest, error_summary=error_summary) + + def printErrors(self): + super(_TextAndXMLTestResult, self).printErrors() + self.xml_stream.write('\n') + self.suite.print_xml_summary(self.xml_stream) + + +class TextAndXMLTestRunner(unittest.TextTestRunner): + """A test runner that produces both formatted text results and XML. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + + _TEST_RESULT_CLASS = _TextAndXMLTestResult + + _xml_stream = None + _testsuites_properties = {} + + def __init__(self, xml_stream=None, *args, **kwargs): + """Initialize a TextAndXMLTestRunner. + + Args: + xml_stream: file-like or None; XML-formatted test results are output + via this object's write() method. If None (the default), the + new instance behaves as described in the set_default_xml_stream method + documentation below. + *args: passed unmodified to unittest.TextTestRunner.__init__. + **kwargs: passed unmodified to unittest.TextTestRunner.__init__. + """ + super(TextAndXMLTestRunner, self).__init__(*args, **kwargs) + if xml_stream is not None: + self._xml_stream = xml_stream + # else, do not set self._xml_stream to None -- this allows implicit fallback + # to the class attribute's value. + + @classmethod + def set_default_xml_stream(cls, xml_stream): + """Sets the default XML stream for the class. + + Args: + xml_stream: file-like or None; used for instances when xml_stream is None + or not passed to their constructors. If None is passed, instances + created with xml_stream=None will act as ordinary TextTestRunner + instances; this is the default state before any calls to this method + have been made. + """ + cls._xml_stream = xml_stream + + def _makeResult(self): + if self._xml_stream is None: + return super(TextAndXMLTestRunner, self)._makeResult() + else: + return self._TEST_RESULT_CLASS( + self._xml_stream, self.stream, self.descriptions, self.verbosity, + testsuites_properties=self._testsuites_properties) + + @classmethod + def set_testsuites_property(cls, key, value): + cls._testsuites_properties[key] = value diff --git a/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cff5eb74e1badd1c5237ed2654b349530179ad1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the psutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1815b243bc8480c1df3c73a79e1dda1ff74ea2f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/psutil-5.9.8.dist-info/METADATA @@ -0,0 +1,530 @@ +Metadata-Version: 2.1 +Name: psutil +Version: 5.9.8 +Summary: Cross-platform lib for process and system monitoring in Python. +Home-page: https://github.com/giampaolo/psutil +Author: Giampaolo Rodola +Author-email: g.rodola@gmail.com +License: BSD-3-Clause +Keywords: ps,top,kill,free,lsof,netstat,nice,tty,ionice,uptime,taskmgr,process,df,iotop,iostat,ifconfig,taskset,who,pidof,pmap,smem,pstree,monitoring,ulimit,prlimit,smem,performance,metrics,agent,observability +Platform: Platform Independent +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Environment :: Win32 (MS Windows) +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows :: Windows 10 +Classifier: Operating System :: Microsoft :: Windows :: Windows 7 +Classifier: Operating System :: Microsoft :: Windows :: Windows 8 +Classifier: Operating System :: Microsoft :: Windows :: Windows 8.1 +Classifier: Operating System :: Microsoft :: Windows :: Windows Server 2003 +Classifier: Operating System :: Microsoft :: Windows :: Windows Server 2008 +Classifier: Operating System :: Microsoft :: Windows :: Windows Vista +Classifier: Operating System :: Microsoft +Classifier: Operating System :: OS Independent +Classifier: Operating System :: POSIX :: AIX +Classifier: Operating System :: POSIX :: BSD :: FreeBSD +Classifier: Operating System :: POSIX :: BSD :: NetBSD +Classifier: Operating System :: POSIX :: BSD :: OpenBSD +Classifier: Operating System :: POSIX :: BSD +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX :: SunOS/Solaris +Classifier: Operating System :: POSIX +Classifier: Programming Language :: C +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System :: Benchmark +Classifier: Topic :: System :: Hardware :: Hardware Drivers +Classifier: Topic :: System :: Hardware +Classifier: Topic :: System :: Monitoring +Classifier: Topic :: System :: Networking :: Monitoring :: Hardware Watchdog +Classifier: Topic :: System :: Networking :: Monitoring +Classifier: Topic :: System :: Networking +Classifier: Topic :: System :: Operating System +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: test +Requires-Dist: ipaddress ; (python_version < "3.0") and extra == 'test' +Requires-Dist: mock ; (python_version < "3.0") and extra == 'test' +Requires-Dist: enum34 ; (python_version <= "3.4") and extra == 'test' +Requires-Dist: pywin32 ; (sys_platform == "win32") and extra == 'test' +Requires-Dist: wmi ; (sys_platform == "win32") and extra == 'test' + +| |downloads| |stars| |forks| |contributors| |coverage| +| |version| |py-versions| |packages| |license| +| |github-actions-wheels| |github-actions-bsd| |appveyor| |doc| |twitter| |tidelift| + +.. |downloads| image:: https://img.shields.io/pypi/dm/psutil.svg + :target: https://pepy.tech/project/psutil + :alt: Downloads + +.. |stars| image:: https://img.shields.io/github/stars/giampaolo/psutil.svg + :target: https://github.com/giampaolo/psutil/stargazers + :alt: Github stars + +.. |forks| image:: https://img.shields.io/github/forks/giampaolo/psutil.svg + :target: https://github.com/giampaolo/psutil/network/members + :alt: Github forks + +.. |contributors| image:: https://img.shields.io/github/contributors/giampaolo/psutil.svg + :target: https://github.com/giampaolo/psutil/graphs/contributors + :alt: Contributors + +.. |github-actions-wheels| image:: https://img.shields.io/github/actions/workflow/status/giampaolo/psutil/.github/workflows/build.yml?label=Linux%2C%20macOS%2C%20Windows + :target: https://github.com/giampaolo/psutil/actions?query=workflow%3Abuild + :alt: Linux, macOS, Windows + +.. |github-actions-bsd| image:: https://img.shields.io/github/actions/workflow/status/giampaolo/psutil/.github/workflows/bsd.yml?label=FreeBSD,%20NetBSD,%20OpenBSD + :target: https://github.com/giampaolo/psutil/actions?query=workflow%3Absd-tests + :alt: FreeBSD, NetBSD, OpenBSD + +.. |appveyor| image:: https://img.shields.io/appveyor/build/giampaolo/psutil/master.svg?maxAge=3600&label=Windows%20(py2) + :target: https://ci.appveyor.com/project/giampaolo/psutil + :alt: Windows (Appveyor) + +.. |coverage| image:: https://coveralls.io/repos/github/giampaolo/psutil/badge.svg?branch=master + :target: https://coveralls.io/github/giampaolo/psutil?branch=master + :alt: Test coverage (coverall.io) + +.. |doc| image:: https://readthedocs.org/projects/psutil/badge/?version=latest + :target: https://psutil.readthedocs.io/en/latest/ + :alt: Documentation Status + +.. |version| image:: https://img.shields.io/pypi/v/psutil.svg?label=pypi + :target: https://pypi.org/project/psutil + :alt: Latest version + +.. |py-versions| image:: https://img.shields.io/pypi/pyversions/psutil.svg + :alt: Supported Python versions + +.. |packages| image:: https://repology.org/badge/tiny-repos/python:psutil.svg + :target: https://repology.org/metapackage/python:psutil/versions + :alt: Binary packages + +.. |license| image:: https://img.shields.io/pypi/l/psutil.svg + :target: https://github.com/giampaolo/psutil/blob/master/LICENSE + :alt: License + +.. |twitter| image:: https://img.shields.io/twitter/follow/grodola.svg?label=follow&style=flat&logo=twitter&logoColor=4FADFF + :target: https://twitter.com/grodola + :alt: Twitter Follow + +.. |tidelift| image:: https://tidelift.com/badges/github/giampaolo/psutil?style=flat + :target: https://tidelift.com/subscription/pkg/pypi-psutil?utm_source=pypi-psutil&utm_medium=referral&utm_campaign=readme + :alt: Tidelift + +----- + +Quick links +=========== + +- `Home page `_ +- `Install `_ +- `Documentation `_ +- `Download `_ +- `Forum `_ +- `StackOverflow `_ +- `Blog `_ +- `What's new `_ + + +Summary +======= + +psutil (process and system utilities) is a cross-platform library for +retrieving information on **running processes** and **system utilization** +(CPU, memory, disks, network, sensors) in Python. +It is useful mainly for **system monitoring**, **profiling and limiting process +resources** and **management of running processes**. +It implements many functionalities offered by classic UNIX command line tools +such as *ps, top, iotop, lsof, netstat, ifconfig, free* and others. +psutil currently supports the following platforms: + +- **Linux** +- **Windows** +- **macOS** +- **FreeBSD, OpenBSD**, **NetBSD** +- **Sun Solaris** +- **AIX** + +Supported Python versions are **2.7**, **3.6+** and +`PyPy `__. + +Funding +======= + +While psutil is free software and will always be, the project would benefit +immensely from some funding. +Keeping up with bug reports and maintenance has become hardly sustainable for +me alone in terms of time. +If you're a company that's making significant use of psutil you can consider +becoming a sponsor via `GitHub Sponsors `__, +`Open Collective `__ or +`PayPal `__ +and have your logo displayed in here and psutil `doc `__. + +Sponsors +======== + +.. image:: https://github.com/giampaolo/psutil/raw/master/docs/_static/tidelift-logo.png + :width: 200 + :alt: Alternative text + +`Add your logo `__. + +Example usages +============== + +This represents pretty much the whole psutil API. + +CPU +--- + +.. code-block:: python + + >>> import psutil + >>> + >>> psutil.cpu_times() + scputimes(user=3961.46, nice=169.729, system=2150.659, idle=16900.540, iowait=629.59, irq=0.0, softirq=19.42, steal=0.0, guest=0, guest_nice=0.0) + >>> + >>> for x in range(3): + ... psutil.cpu_percent(interval=1) + ... + 4.0 + 5.9 + 3.8 + >>> + >>> for x in range(3): + ... psutil.cpu_percent(interval=1, percpu=True) + ... + [4.0, 6.9, 3.7, 9.2] + [7.0, 8.5, 2.4, 2.1] + [1.2, 9.0, 9.9, 7.2] + >>> + >>> for x in range(3): + ... psutil.cpu_times_percent(interval=1, percpu=False) + ... + scputimes(user=1.5, nice=0.0, system=0.5, idle=96.5, iowait=1.5, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) + scputimes(user=1.0, nice=0.0, system=0.0, idle=99.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) + scputimes(user=2.0, nice=0.0, system=0.0, idle=98.0, iowait=0.0, irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) + >>> + >>> psutil.cpu_count() + 4 + >>> psutil.cpu_count(logical=False) + 2 + >>> + >>> psutil.cpu_stats() + scpustats(ctx_switches=20455687, interrupts=6598984, soft_interrupts=2134212, syscalls=0) + >>> + >>> psutil.cpu_freq() + scpufreq(current=931.42925, min=800.0, max=3500.0) + >>> + >>> psutil.getloadavg() # also on Windows (emulated) + (3.14, 3.89, 4.67) + +Memory +------ + +.. code-block:: python + + >>> psutil.virtual_memory() + svmem(total=10367352832, available=6472179712, percent=37.6, used=8186245120, free=2181107712, active=4748992512, inactive=2758115328, buffers=790724608, cached=3500347392, shared=787554304) + >>> psutil.swap_memory() + sswap(total=2097147904, used=296128512, free=1801019392, percent=14.1, sin=304193536, sout=677842944) + >>> + +Disks +----- + +.. code-block:: python + + >>> psutil.disk_partitions() + [sdiskpart(device='/dev/sda1', mountpoint='/', fstype='ext4', opts='rw,nosuid', maxfile=255, maxpath=4096), + sdiskpart(device='/dev/sda2', mountpoint='/home', fstype='ext', opts='rw', maxfile=255, maxpath=4096)] + >>> + >>> psutil.disk_usage('/') + sdiskusage(total=21378641920, used=4809781248, free=15482871808, percent=22.5) + >>> + >>> psutil.disk_io_counters(perdisk=False) + sdiskio(read_count=719566, write_count=1082197, read_bytes=18626220032, write_bytes=24081764352, read_time=5023392, write_time=63199568, read_merged_count=619166, write_merged_count=812396, busy_time=4523412) + >>> + +Network +------- + +.. code-block:: python + + >>> psutil.net_io_counters(pernic=True) + {'eth0': netio(bytes_sent=485291293, bytes_recv=6004858642, packets_sent=3251564, packets_recv=4787798, errin=0, errout=0, dropin=0, dropout=0), + 'lo': netio(bytes_sent=2838627, bytes_recv=2838627, packets_sent=30567, packets_recv=30567, errin=0, errout=0, dropin=0, dropout=0)} + >>> + >>> psutil.net_connections(kind='tcp') + [sconn(fd=115, family=, type=, laddr=addr(ip='10.0.0.1', port=48776), raddr=addr(ip='93.186.135.91', port=80), status='ESTABLISHED', pid=1254), + sconn(fd=117, family=, type=, laddr=addr(ip='10.0.0.1', port=43761), raddr=addr(ip='72.14.234.100', port=80), status='CLOSING', pid=2987), + ...] + >>> + >>> psutil.net_if_addrs() + {'lo': [snicaddr(family=, address='127.0.0.1', netmask='255.0.0.0', broadcast='127.0.0.1', ptp=None), + snicaddr(family=, address='::1', netmask='ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', broadcast=None, ptp=None), + snicaddr(family=, address='00:00:00:00:00:00', netmask=None, broadcast='00:00:00:00:00:00', ptp=None)], + 'wlan0': [snicaddr(family=, address='192.168.1.3', netmask='255.255.255.0', broadcast='192.168.1.255', ptp=None), + snicaddr(family=, address='fe80::c685:8ff:fe45:641%wlan0', netmask='ffff:ffff:ffff:ffff::', broadcast=None, ptp=None), + snicaddr(family=, address='c4:85:08:45:06:41', netmask=None, broadcast='ff:ff:ff:ff:ff:ff', ptp=None)]} + >>> + >>> psutil.net_if_stats() + {'lo': snicstats(isup=True, duplex=, speed=0, mtu=65536, flags='up,loopback,running'), + 'wlan0': snicstats(isup=True, duplex=, speed=100, mtu=1500, flags='up,broadcast,running,multicast')} + >>> + +Sensors +------- + +.. code-block:: python + + >>> import psutil + >>> psutil.sensors_temperatures() + {'acpitz': [shwtemp(label='', current=47.0, high=103.0, critical=103.0)], + 'asus': [shwtemp(label='', current=47.0, high=None, critical=None)], + 'coretemp': [shwtemp(label='Physical id 0', current=52.0, high=100.0, critical=100.0), + shwtemp(label='Core 0', current=45.0, high=100.0, critical=100.0)]} + >>> + >>> psutil.sensors_fans() + {'asus': [sfan(label='cpu_fan', current=3200)]} + >>> + >>> psutil.sensors_battery() + sbattery(percent=93, secsleft=16628, power_plugged=False) + >>> + +Other system info +----------------- + +.. code-block:: python + + >>> import psutil + >>> psutil.users() + [suser(name='giampaolo', terminal='pts/2', host='localhost', started=1340737536.0, pid=1352), + suser(name='giampaolo', terminal='pts/3', host='localhost', started=1340737792.0, pid=1788)] + >>> + >>> psutil.boot_time() + 1365519115.0 + >>> + +Process management +------------------ + +.. code-block:: python + + >>> import psutil + >>> psutil.pids() + [1, 2, 3, 4, 5, 6, 7, 46, 48, 50, 51, 178, 182, 222, 223, 224, 268, 1215, + 1216, 1220, 1221, 1243, 1244, 1301, 1601, 2237, 2355, 2637, 2774, 3932, + 4176, 4177, 4185, 4187, 4189, 4225, 4243, 4245, 4263, 4282, 4306, 4311, + 4312, 4313, 4314, 4337, 4339, 4357, 4358, 4363, 4383, 4395, 4408, 4433, + 4443, 4445, 4446, 5167, 5234, 5235, 5252, 5318, 5424, 5644, 6987, 7054, + 7055, 7071] + >>> + >>> p = psutil.Process(7055) + >>> p + psutil.Process(pid=7055, name='python3', status='running', started='09:04:44') + >>> p.pid + 7055 + >>> p.name() + 'python3' + >>> p.exe() + '/usr/bin/python3' + >>> p.cwd() + '/home/giampaolo' + >>> p.cmdline() + ['/usr/bin/python3', 'main.py'] + >>> + >>> p.ppid() + 7054 + >>> p.parent() + psutil.Process(pid=4699, name='bash', status='sleeping', started='09:06:44') + >>> p.parents() + [psutil.Process(pid=4699, name='bash', started='09:06:44'), + psutil.Process(pid=4689, name='gnome-terminal-server', status='sleeping', started='0:06:44'), + psutil.Process(pid=1, name='systemd', status='sleeping', started='05:56:55')] + >>> p.children(recursive=True) + [psutil.Process(pid=29835, name='python3', status='sleeping', started='11:45:38'), + psutil.Process(pid=29836, name='python3', status='waking', started='11:43:39')] + >>> + >>> p.status() + 'running' + >>> p.create_time() + 1267551141.5019531 + >>> p.terminal() + '/dev/pts/0' + >>> + >>> p.username() + 'giampaolo' + >>> p.uids() + puids(real=1000, effective=1000, saved=1000) + >>> p.gids() + pgids(real=1000, effective=1000, saved=1000) + >>> + >>> p.cpu_times() + pcputimes(user=1.02, system=0.31, children_user=0.32, children_system=0.1, iowait=0.0) + >>> p.cpu_percent(interval=1.0) + 12.1 + >>> p.cpu_affinity() + [0, 1, 2, 3] + >>> p.cpu_affinity([0, 1]) # set + >>> p.cpu_num() + 1 + >>> + >>> p.memory_info() + pmem(rss=10915840, vms=67608576, shared=3313664, text=2310144, lib=0, data=7262208, dirty=0) + >>> p.memory_full_info() # "real" USS memory usage (Linux, macOS, Win only) + pfullmem(rss=10199040, vms=52133888, shared=3887104, text=2867200, lib=0, data=5967872, dirty=0, uss=6545408, pss=6872064, swap=0) + >>> p.memory_percent() + 0.7823 + >>> p.memory_maps() + [pmmap_grouped(path='/lib/x8664-linux-gnu/libutil-2.15.so', rss=32768, size=2125824, pss=32768, shared_clean=0, shared_dirty=0, private_clean=20480, private_dirty=12288, referenced=32768, anonymous=12288, swap=0), + pmmap_grouped(path='/lib/x8664-linux-gnu/libc-2.15.so', rss=3821568, size=3842048, pss=3821568, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=3821568, referenced=3575808, anonymous=3821568, swap=0), + pmmap_grouped(path='[heap]', rss=32768, size=139264, pss=32768, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=32768, referenced=32768, anonymous=32768, swap=0), + pmmap_grouped(path='[stack]', rss=2465792, size=2494464, pss=2465792, shared_clean=0, shared_dirty=0, private_clean=0, private_dirty=2465792, referenced=2277376, anonymous=2465792, swap=0), + ...] + >>> + >>> p.io_counters() + pio(read_count=478001, write_count=59371, read_bytes=700416, write_bytes=69632, read_chars=456232, write_chars=517543) + >>> + >>> p.open_files() + [popenfile(path='/home/giampaolo/monit.py', fd=3, position=0, mode='r', flags=32768), + popenfile(path='/var/log/monit.log', fd=4, position=235542, mode='a', flags=33793)] + >>> + >>> p.connections(kind='tcp') + [pconn(fd=115, family=, type=, laddr=addr(ip='10.0.0.1', port=48776), raddr=addr(ip='93.186.135.91', port=80), status='ESTABLISHED'), + pconn(fd=117, family=, type=, laddr=addr(ip='10.0.0.1', port=43761), raddr=addr(ip='72.14.234.100', port=80), status='CLOSING')] + >>> + >>> p.threads() + [pthread(id=5234, user_time=22.5, system_time=9.2891), + pthread(id=5237, user_time=0.0707, system_time=1.1)] + >>> + >>> p.num_threads() + 4 + >>> p.num_fds() + 8 + >>> p.num_ctx_switches() + pctxsw(voluntary=78, involuntary=19) + >>> + >>> p.nice() + 0 + >>> p.nice(10) # set + >>> + >>> p.ionice(psutil.IOPRIO_CLASS_IDLE) # IO priority (Win and Linux only) + >>> p.ionice() + pionice(ioclass=, value=0) + >>> + >>> p.rlimit(psutil.RLIMIT_NOFILE, (5, 5)) # set resource limits (Linux only) + >>> p.rlimit(psutil.RLIMIT_NOFILE) + (5, 5) + >>> + >>> p.environ() + {'LC_PAPER': 'it_IT.UTF-8', 'SHELL': '/bin/bash', 'GREP_OPTIONS': '--color=auto', + 'XDG_CONFIG_DIRS': '/etc/xdg/xdg-ubuntu:/usr/share/upstart/xdg:/etc/xdg', + ...} + >>> + >>> p.as_dict() + {'status': 'running', 'num_ctx_switches': pctxsw(voluntary=63, involuntary=1), 'pid': 5457, ...} + >>> p.is_running() + True + >>> p.suspend() + >>> p.resume() + >>> + >>> p.terminate() + >>> p.kill() + >>> p.wait(timeout=3) + + >>> + >>> psutil.test() + USER PID %CPU %MEM VSZ RSS TTY START TIME COMMAND + root 1 0.0 0.0 24584 2240 Jun17 00:00 init + root 2 0.0 0.0 0 0 Jun17 00:00 kthreadd + ... + giampaolo 31475 0.0 0.0 20760 3024 /dev/pts/0 Jun19 00:00 python2.4 + giampaolo 31721 0.0 2.2 773060 181896 00:04 10:30 chrome + root 31763 0.0 0.0 0 0 00:05 00:00 kworker/0:1 + >>> + +Further process APIs +-------------------- + +.. code-block:: python + + >>> import psutil + >>> for proc in psutil.process_iter(['pid', 'name']): + ... print(proc.info) + ... + {'pid': 1, 'name': 'systemd'} + {'pid': 2, 'name': 'kthreadd'} + {'pid': 3, 'name': 'ksoftirqd/0'} + ... + >>> + >>> psutil.pid_exists(3) + True + >>> + >>> def on_terminate(proc): + ... print("process {} terminated".format(proc)) + ... + >>> # waits for multiple processes to terminate + >>> gone, alive = psutil.wait_procs(procs_list, timeout=3, callback=on_terminate) + >>> + +Windows services +---------------- + +.. code-block:: python + + >>> list(psutil.win_service_iter()) + [, + , + , + , + ...] + >>> s = psutil.win_service_get('alg') + >>> s.as_dict() + {'binpath': 'C:\\Windows\\System32\\alg.exe', + 'description': 'Provides support for 3rd party protocol plug-ins for Internet Connection Sharing', + 'display_name': 'Application Layer Gateway Service', + 'name': 'alg', + 'pid': None, + 'start_type': 'manual', + 'status': 'stopped', + 'username': 'NT AUTHORITY\\LocalService'} + +Projects using psutil +===================== + +Here's some I find particularly interesting: + +- https://github.com/google/grr +- https://github.com/facebook/osquery/ +- https://github.com/nicolargo/glances +- https://github.com/aristocratos/bpytop +- https://github.com/Jahaja/psdash +- https://github.com/ajenti/ajenti +- https://github.com/home-assistant/home-assistant/ + +Portings +======== + +- Go: https://github.com/shirou/gopsutil +- C: https://github.com/hamon-in/cpslib +- Rust: https://github.com/rust-psutil/rust-psutil +- Nim: https://github.com/johnscillieri/psutil-nim + + + diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..7bb1330a1002b78e748296b4be96a72f3fb67b4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/LICENSE.txt @@ -0,0 +1,2261 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD + +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: + + Copyright (c) 2013 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from Daniel Lemire's FrameOfReference project. + +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp +https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py + +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the TensorFlow project + +Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the NumPy project. + +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 + +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from the FlatBuffers project + +Copyright 2014 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The script r/configure has the following license (MIT) + +Copyright (c) 2017, Jeroen Ooms and Jim Hester + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). + +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. + +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) + +Copyright (c) 2008-2009 Bjoern Hoehrmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utfcpp/ have the following license + +Copyright 2006-2018 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN +* cpp/src/arrow/util/bit_stream_utils.h contains code from wire_format_lite.h + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: +============================================================================== + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +---- LLVM Exceptions to the Apache 2.0 License ---- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into an Object form of such source code, you +may redistribute such embedded portions in such Object form without complying +with the conditions of Sections 4(a), 4(b) and 4(d) of the License. + +In addition, if you combine or link compiled forms of this Software with +software that is licensed under the GPLv2 ("Combined Software") and if a +court of competent jurisdiction determines that the patent provision (Section +3), the indemnity provision (Section 9) or other Section of the License +conflicts with the conditions of the GPLv2, you may retroactively and +prospectively choose to deem waived or otherwise exclude such Section(s) of +the License, but only in their entirety and only with respect to the Combined +Software. + +============================================================================== +Software from third parties included in the LLVM Project: +============================================================================== +The LLVM Project contains third party software which is under different license +terms. All such code will be identified clearly using at least one of two +mechanisms: +1) It will be in a separate directory tree with its own `LICENSE.txt` or + `LICENSE` file at the top containing the specific license and restrictions + which apply to that software, or +2) It will contain specific license and restriction terms at the top of every + file. + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency rapidjson is statically linked in certain binary +distributions, like the python wheels. rapidjson and its dependencies have the +following licenses: + +Tencent is pleased to support the open source community by making RapidJSON +available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. +All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note +that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please +note that RapidJSON source code is licensed under the MIT License, except for +the third-party components listed below which are subject to different license +terms. Your integration of RapidJSON into your own projects may require +compliance with the MIT License, as well as the other licenses applicable to +the third-party components included within RapidJSON. To avoid the problematic +JSON license in your own projects, it's sufficient to exclude the +bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + + Open Source Software Licensed Under the BSD License: + -------------------------------------------------------------------- + + The msinttypes r29 + Copyright (c) 2006-2013 Alexander Chemeris + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + + Terms of the MIT License: + -------------------------------------------------------------------- + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +This project includes code from Folly. + + * cpp/src/arrow/vendored/ProducerConsumerQueue.h + +is based on Folly's + + * folly/Portability.h + * folly/lang/Align.h + * folly/ProducerConsumerQueue.h + +Copyright: Copyright (c) Facebook, Inc. and its affiliates. +Home page: https://github.com/facebook/folly +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/cmake_modules/BuildUtils.cmake contains code from + +https://gist.github.com/cristianadam/ef920342939a89fae3e8a85ca9459b49 + +which is made available under the MIT license + +Copyright (c) 2019 Cristian Adam + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/portable-snippets/ contain code from + +https://github.com/nemequ/portable-snippets + +and have the following copyright notice: + +Each source file contains a preamble explaining the license situation +for that file, which takes priority over this file. With the +exception of some code pulled in from other repositories (such as +µnit, an MIT-licensed project which is used for testing), the code is +public domain, released using the CC0 1.0 Universal dedication (*). + +(*) https://creativecommons.org/publicdomain/zero/1.0/legalcode + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/fast_float/ contain code from + +https://github.com/lemire/fast_float + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/docscrape.py contains code from + +https://github.com/numpy/numpydoc/ + +which is made available under the BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The file python/pyarrow/vendored/version.py contains code from + +https://github.com/pypa/packaging/ + +which is made available under both the Apache license v2.0 and the +BSD 2-clause license. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/pcg contain code from + +https://github.com/imneme/pcg-cpp + +and have the following copyright notice: + +Copyright 2014-2019 Melissa O'Neill , + and the PCG Project contributors. + +SPDX-License-Identifier: (Apache-2.0 OR MIT) + +Licensed under the Apache License, Version 2.0 (provided in +LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) +or under the MIT license (provided in LICENSE-MIT.txt and at +http://opensource.org/licenses/MIT), at your option. This file may not +be copied, modified, or distributed except according to those terms. + +Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either +express or implied. See your chosen license for details. + +-------------------------------------------------------------------------------- +r/R/dplyr-count-tally.R (some portions) + +Some portions of this file are derived from code from + +https://github.com/tidyverse/dplyr/ + +which is made available under the MIT license + +Copyright (c) 2013-2019 RStudio and others. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +The file src/arrow/util/io_util.cc contains code from the CPython project +which is made available under the Python Software Foundation License Version 2. + +-------------------------------------------------------------------------------- + +3rdparty dependency opentelemetry-cpp is statically linked in certain binary +distributions. opentelemetry-cpp is made available under the Apache License 2.0. + +Copyright The OpenTelemetry Authors +SPDX-License-Identifier: Apache-2.0 + +-------------------------------------------------------------------------------- + +ci/conan/ is based on code from Conan Package and Dependency Manager. + +Copyright (c) 2019 Conan.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency UCX is redistributed as a dynamically linked shared +library in certain binary distributions. UCX has the following license: + +Copyright (c) 2014-2015 UT-Battelle, LLC. All rights reserved. +Copyright (C) 2014-2020 Mellanox Technologies Ltd. All rights reserved. +Copyright (C) 2014-2015 The University of Houston System. All rights reserved. +Copyright (C) 2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights reserved. +Copyright (C) 2016-2020 ARM Ltd. All rights reserved. +Copyright (c) 2016 Los Alamos National Security, LLC. All rights reserved. +Copyright (C) 2016-2020 Advanced Micro Devices, Inc. All rights reserved. +Copyright (C) 2019 UChicago Argonne, LLC. All rights reserved. +Copyright (c) 2018-2020 NVIDIA CORPORATION. All rights reserved. +Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved. +Copyright (C) 2016-2020 Stony Brook University. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The file dev/tasks/r/github.packages.yml contains code from + +https://github.com/ursa-labs/arrow-r-nightly + +which is made available under the Apache License 2.0. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/JoshPiper/rsync-docker + +which is made available under the MIT license + +Copyright (c) 2020 Joshua Piper + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +.github/actions/sync-nightlies/action.yml (some portions) + +Some portions of this file are derived from code from + +https://github.com/burnett01/rsync-deployments + +which is made available under the MIT license + +Copyright (c) 2019-2022 Contention +Copyright (c) 2019-2022 Burnett01 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectHashMap.java +java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectMap.java + +These file are derived from code from Netty, which is made available under the +Apache License 2.0. diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b04a600c4bcfd6af8b7003432d9e3bbc7ba42c51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/METADATA @@ -0,0 +1,81 @@ +Metadata-Version: 2.1 +Name: pyarrow +Version: 16.0.0 +Summary: Python library for Apache Arrow +Home-page: https://arrow.apache.org/ +Maintainer: Apache Arrow Developers +Maintainer-email: dev@arrow.apache.org +License: Apache License, Version 2.0 +Project-URL: Documentation, https://arrow.apache.org/docs/python +Project-URL: Source, https://github.com/apache/arrow +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: ../LICENSE.txt +License-File: ../NOTICE.txt +Requires-Dist: numpy >=1.16.6 + + + +## Python library for Apache Arrow + +[![pypi](https://img.shields.io/pypi/v/pyarrow.svg)](https://pypi.org/project/pyarrow/) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/pyarrow.svg)](https://anaconda.org/conda-forge/pyarrow) + +This library provides a Python API for functionality provided by the Arrow C++ +libraries, along with tools for Arrow integration and interoperability with +pandas, NumPy, and other software in the Python ecosystem. + +## Installing + +Across platforms, you can install a recent version of pyarrow with the conda +package manager: + +```shell +conda install pyarrow -c conda-forge +``` + +On Linux, macOS, and Windows, you can also install binary wheels from PyPI with +pip: + +```shell +pip install pyarrow +``` + +If you encounter any issues importing the pip wheels on Windows, you may need +to install the [Visual C++ Redistributable for Visual Studio 2015][6]. + +## Development + +See [Python Development][2] in the documentation subproject. + +### Building the documentation + +See [documentation build instructions][1] in the documentation subproject. + +[1]: https://github.com/apache/arrow/blob/main/docs/source/developers/documentation.rst +[2]: https://github.com/apache/arrow/blob/main/docs/source/developers/python.rst +[3]: https://github.com/pandas-dev/pandas +[5]: https://arrow.apache.org/docs/latest/python/benchmarks.html +[6]: https://www.microsoft.com/en-us/download/details.aspx?id=48145 diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/NOTICE.txt b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/NOTICE.txt new file mode 100644 index 0000000000000000000000000000000000000000..2089c6fb20358ccf5e3a58ea27a234555b923f6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/NOTICE.txt @@ -0,0 +1,84 @@ +Apache Arrow +Copyright 2016-2024 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This product includes software from the SFrame project (BSD, 3-clause). +* Copyright (C) 2015 Dato, Inc. +* Copyright (c) 2009 Carnegie Mellon University. + +This product includes software from the Feather project (Apache 2.0) +https://github.com/wesm/feather + +This product includes software from the DyND project (BSD 2-clause) +https://github.com/libdynd + +This product includes software from the LLVM project + * distributed under the University of Illinois Open Source + +This product includes software from the google-lint project + * Copyright (c) 2009 Google Inc. All rights reserved. + +This product includes software from the mman-win32 project + * Copyright https://code.google.com/p/mman-win32/ + * Licensed under the MIT License; + +This product includes software from the LevelDB project + * Copyright (c) 2011 The LevelDB Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * Moved from Kudu http://github.com/cloudera/kudu + +This product includes software from the CMake project + * Copyright 2001-2009 Kitware, Inc. + * Copyright 2012-2014 Continuum Analytics, Inc. + * All rights reserved. + +This product includes software from https://github.com/matthew-brett/multibuild (BSD 2-clause) + * Copyright (c) 2013-2016, Matt Terry and Matthew Brett; all rights reserved. + +This product includes software from the Ibis project (Apache 2.0) + * Copyright (c) 2015 Cloudera, Inc. + * https://github.com/cloudera/ibis + +This product includes software from Dremio (Apache 2.0) + * Copyright (C) 2017-2018 Dremio Corporation + * https://github.com/dremio/dremio-oss + +This product includes software from Google Guava (Apache 2.0) + * Copyright (C) 2007 The Guava Authors + * https://github.com/google/guava + +This product include software from CMake (BSD 3-Clause) + * CMake - Cross Platform Makefile Generator + * Copyright 2000-2019 Kitware, Inc. and Contributors + +The web site includes files generated by Jekyll. + +-------------------------------------------------------------------------------- + +This product includes code from Apache Kudu, which includes the following in +its NOTICE file: + + Apache Kudu + Copyright 2016 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were developed at + Cloudera, Inc (http://www.cloudera.com/). + +-------------------------------------------------------------------------------- + +This product includes code from Apache ORC, which includes the following in +its NOTICE file: + + Apache ORC + Copyright 2013-2019 The Apache Software Foundation + + This product includes software developed by The Apache Software + Foundation (http://www.apache.org/). + + This product includes software developed by Hewlett-Packard: + (c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..621c23043b5f6f9b0fcc7c5dafd73977dad6eb47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/RECORD @@ -0,0 +1,848 @@ +pyarrow-16.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyarrow-16.0.0.dist-info/LICENSE.txt,sha256=Ip2-KeThNE6VFy9vOkJ37A2lx4UMsDiXxH86JLevzgg,110423 +pyarrow-16.0.0.dist-info/METADATA,sha256=aLyEBK0-uVIQW5r8-CtPmh2zD1w1gNQnrRvRYMFOes0,3038 +pyarrow-16.0.0.dist-info/NOTICE.txt,sha256=ti6iQmQtOhjx4psMH-CCQVppQ_4VjuIrSM_zdi81QAk,3032 +pyarrow-16.0.0.dist-info/RECORD,, +pyarrow-16.0.0.dist-info/WHEEL,sha256=zL8QdYpn40L5zyQJMSDc8czZQHX1BqCvPZ3z2rz6kD0,114 +pyarrow-16.0.0.dist-info/top_level.txt,sha256=Zuk_c1WeinXdMz20fXlEtGC67zfKOWuwU8adpEEU_nI,18 +pyarrow/__init__.pxd,sha256=Wnar1phFqM_ZHnZmtbuqm6wJHsXlBoYKhV7Qmo2jUHA,2195 +pyarrow/__init__.py,sha256=Y6VXxOFNh6Rh5GAvFqJFt0jcgWVP8vbb3t7Y1_palmU,17767 +pyarrow/__pycache__/__init__.cpython-310.pyc,, +pyarrow/__pycache__/_compute_docstrings.cpython-310.pyc,, +pyarrow/__pycache__/_generated_version.cpython-310.pyc,, +pyarrow/__pycache__/acero.cpython-310.pyc,, +pyarrow/__pycache__/benchmark.cpython-310.pyc,, +pyarrow/__pycache__/cffi.cpython-310.pyc,, +pyarrow/__pycache__/compute.cpython-310.pyc,, +pyarrow/__pycache__/conftest.cpython-310.pyc,, +pyarrow/__pycache__/csv.cpython-310.pyc,, +pyarrow/__pycache__/cuda.cpython-310.pyc,, +pyarrow/__pycache__/dataset.cpython-310.pyc,, +pyarrow/__pycache__/feather.cpython-310.pyc,, +pyarrow/__pycache__/flight.cpython-310.pyc,, +pyarrow/__pycache__/fs.cpython-310.pyc,, +pyarrow/__pycache__/ipc.cpython-310.pyc,, +pyarrow/__pycache__/json.cpython-310.pyc,, +pyarrow/__pycache__/jvm.cpython-310.pyc,, +pyarrow/__pycache__/orc.cpython-310.pyc,, +pyarrow/__pycache__/pandas_compat.cpython-310.pyc,, +pyarrow/__pycache__/substrait.cpython-310.pyc,, +pyarrow/__pycache__/types.cpython-310.pyc,, +pyarrow/__pycache__/util.cpython-310.pyc,, +pyarrow/_acero.cpython-310-x86_64-linux-gnu.so,sha256=SPWLxCy2UANuLjEKcJdfcHLr8iZC_J3LBhCjvtaGq9E,320744 +pyarrow/_acero.pxd,sha256=5ish_GgGWvit4ebhzoZil7b-m0r2RuG5JwYoxsH34FI,1440 +pyarrow/_acero.pyx,sha256=56orFsG2ksoP4C0DPIa-ruQxQRCC489lYlHkGJIh1zY,21301 +pyarrow/_azurefs.cpython-310-x86_64-linux-gnu.so,sha256=y_ijHvWLSbgrqDMu2awk8VyNOBdQL-g0lAOHGsDxtzQ,105272 +pyarrow/_azurefs.pyx,sha256=ezGU_z3kIw2r14YAB4yogaxMssJXikcxajz4dZSez1k,5909 +pyarrow/_compute.cpython-310-x86_64-linux-gnu.so,sha256=ufhD9jGVUWZzSeAoUoC0Fbfhg4J0OVxTl9Ev1cn1QWE,1331992 +pyarrow/_compute.pxd,sha256=nmjgwV2KCGFfxZj5ruDwM4oH1ITqF0rDQS0yDvcaXBA,1949 +pyarrow/_compute.pyx,sha256=JevR54U8aO2mAg9N0msMCwrzr-WFgD9xHd35xX5x4_o,106194 +pyarrow/_compute_docstrings.py,sha256=7Vg8jt1aCsWrpTxsdqR7gY6M0faxXNX31c1RZdq9CFw,1707 +pyarrow/_csv.cpython-310-x86_64-linux-gnu.so,sha256=aWbt02ILnsDHNr8WRseFhX6XxHBfzrdEvVgnRkMfvfg,361488 +pyarrow/_csv.pxd,sha256=1Zk3Zpvvhy-Tb7c79Aqd4e7bBM21kc1JxWJkl02Y4DE,1638 +pyarrow/_csv.pyx,sha256=aqWLHgfZ-nrqKHfVVVXeGbtY2ZChE6Oi9qIa5jP4m1M,54705 +pyarrow/_cuda.pxd,sha256=VzhM6j9dpNgrABlvFJKoMpRC0As55im-M3tgPTOuwEk,1922 +pyarrow/_cuda.pyx,sha256=gKyOLgtA6_FPuACA_EJQP7ksOduFXxJ0QZPzLb3zDBo,34652 +pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so,sha256=RHPVGeXvPJbzHNiql3JSXEaofXDZqSLm39tXJquqm3c,1083224 +pyarrow/_dataset.pxd,sha256=Ag9rUhoBySU6ba3wFLeuZyWMJnz9VkAf9TQEzWG4hUU,4944 +pyarrow/_dataset.pyx,sha256=V2ZguywdiVCBy4usFIb-aYu1vrMde6rI4Xn9tqKUP8w,156370 +pyarrow/_dataset_orc.cpython-310-x86_64-linux-gnu.so,sha256=lXlfplSxc-2Fq6M--gTNMbJDwPti4MubJPcm7KchpAg,78592 +pyarrow/_dataset_orc.pyx,sha256=JSFoRI0pfHtL2jeIuPg5TJHodcfuCNYmj_iEZ4xY87w,1499 +pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so,sha256=eLKpu7-l4DCQk_iTjyyI1-1EsZ6pecqyRT4Uogqoe_Q,357384 +pyarrow/_dataset_parquet.pxd,sha256=WWTX-c1l5KxnbpoD7RqLG6PjGmFzCjJxc1tqR9bOjj8,1534 +pyarrow/_dataset_parquet.pyx,sha256=STPsXTrY_bJM7TrXnxLEVscx0rpvqsFORq4Du9SpMlo,37525 +pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so,sha256=cSAOv37MMNeBj5t0QnWe3xXZJyFo1wuZbTl172fq_AQ,116120 +pyarrow/_dataset_parquet_encryption.pyx,sha256=VRVZGbADYRptTQvnjENls-JuozBre2Ir_MH0CfSqP00,6973 +pyarrow/_dlpack.pxi,sha256=clw0FkGoyZMEtUU8zPpO_DMtl2X-27kb2UtyhQuIc1s,1832 +pyarrow/_feather.cpython-310-x86_64-linux-gnu.so,sha256=BOFxmVRdcSW6RcKEd7oil3sj0GJhRyoZPsGK7Npn4zU,114336 +pyarrow/_feather.pyx,sha256=DWQI4U0uAWE1ZYUwPreBPJg1TGLEGmF3wPEIRL-PhPw,3773 +pyarrow/_flight.cpython-310-x86_64-linux-gnu.so,sha256=1psTuzivdA2TKzMz-SOxwRvjpCcn_jJ5As2DhNxrOHQ,1291200 +pyarrow/_flight.pyx,sha256=siQR9YhOPLToP5dnHtkm-aCjgPfBLYq8d777RHY_MsY,110592 +pyarrow/_fs.cpython-310-x86_64-linux-gnu.so,sha256=clSVodmgrjBKIKc-FlOlF7NYuT1jAHoV8SOKkWqhBPA,496496 +pyarrow/_fs.pxd,sha256=47IL_nsp3Rr7pTypq8gJ0zRcNDzuG-M2lX7C3ZX9ONQ,2484 +pyarrow/_fs.pyx,sha256=WTKV7Cl29yUyLEVLwfTp8yPlrqrrl1Kj6I_99fj-6NU,52716 +pyarrow/_gcsfs.cpython-310-x86_64-linux-gnu.so,sha256=oNJ0LqgefHNe4jHmkfeiy3rTk5V7IVGDktVbQBb6M70,132080 +pyarrow/_gcsfs.pyx,sha256=fa1QmTQII9sFKtjtfeZPQfTEntAh3IGyJJ1w116OCA4,9121 +pyarrow/_generated_version.py,sha256=q77qgcB8sGiyCItHO79Rra0lvTYndWIb4sme_hosDLc,413 +pyarrow/_hdfs.cpython-310-x86_64-linux-gnu.so,sha256=eb7xyY7NAx2kgFfP7kwvlbk4uNOAYM5EqE-WnC5uw-0,130432 +pyarrow/_hdfs.pyx,sha256=HA0KkZa6aVRmwg3ku3U7lZo_8COn1cLwylfc6nEJUlg,5885 +pyarrow/_json.cpython-310-x86_64-linux-gnu.so,sha256=2VjYQbxbPbz0TzOORO2yC4RBH9jERnMzcE9eVQxRT5s,108024 +pyarrow/_json.pxd,sha256=tECTP14M12-b_ja5QI3snQbd0uWPWmmC9FwkWq23Vg0,1206 +pyarrow/_json.pyx,sha256=RmaWaSTG61u2Qcmc_fLsTns_awLJDls3_SdlaCAg53Y,9860 +pyarrow/_orc.cpython-310-x86_64-linux-gnu.so,sha256=OLrYkWdvOB9vk46crDYx15gMSPgG-zIZWuxSm_YZ3mE,204272 +pyarrow/_orc.pxd,sha256=6hL0cq1RufqQD-B_bV3ne1rhu2g-h4rDOFNQsSb6qps,5689 +pyarrow/_orc.pyx,sha256=Pn7r4dzagWaqMf8rymbXBIWisxonBaStZgXCi7pfrZI,15556 +pyarrow/_parquet.cpython-310-x86_64-linux-gnu.so,sha256=fGu73o5XUTEGlGP9CkqLtLCc9vSYwcOZWSajUSQ4rfA,592944 +pyarrow/_parquet.pxd,sha256=sZ6O-lcYhtlMBx2vyjThxzMsWBgQ3ZEEEmhSxGSKgy0,26419 +pyarrow/_parquet.pyx,sha256=5DHDWSZBCBKcedsPeuMnFlLAMd2N_TXs7xCVtPFLmrY,72349 +pyarrow/_parquet_encryption.cpython-310-x86_64-linux-gnu.so,sha256=MFkCDf01vR_4IiBi_Dk0r6WGSLo57uA6KzPQOeGG02s,280432 +pyarrow/_parquet_encryption.pxd,sha256=1vQnkyS1rLrSNMlmuW62PxkOmCsYpzC60L9mqD0_LYc,2586 +pyarrow/_parquet_encryption.pyx,sha256=CaTiq5EjTVGYQnxDEmpYcItSBiEencV-pNEu-lBAiOk,18625 +pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so,sha256=AfG_lxSjvaHjaw0XCKWvEcYW4VV-q5gIin4DHfVYCTU,84152 +pyarrow/_pyarrow_cpp_tests.pxd,sha256=nPyRmNtFbOUvSXCwegAApQFfh8UI_K9Hq5dN4oPAxdo,1199 +pyarrow/_pyarrow_cpp_tests.pyx,sha256=gLeMzB9RWodZgXEpipX65_0aqWu12SjMld0JZmZVRP0,1753 +pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so,sha256=9AFLNhRV5to3cjIS9sRdGnvUsh9mjc9qXl4gY57zqzQ,226384 +pyarrow/_s3fs.pyx,sha256=KQnbrlmM8fgjn-8bKJ3cf4f7hLJS4-82o1JrNb-IrRI,18696 +pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so,sha256=Moj_eVecQPmYnGG30UKiYi--SKbOhTxaXxNmQ79oh40,185128 +pyarrow/_substrait.pyx,sha256=CA2kxzxJUVPL7lMn8_XSAa9jt1Alq4IbhcI3sHGvsxw,11630 +pyarrow/acero.py,sha256=B3o5t2VD71XPM5W11S1om7DLrJRVN-JncONov3PlKow,14879 +pyarrow/array.pxi,sha256=58sF-P1c865KKlu_QR_K5I7to9wwsNPrCWjmBkhDcf8,140813 +pyarrow/benchmark.pxi,sha256=DYXdu-jMSH7XcTohbc8x8NiKRLtpX9IULfY20ohkffA,869 +pyarrow/benchmark.py,sha256=k9Z3yQyoojpYz4lTA6DkCfqT6fPG3N2fJtsHKjpbYFo,856 +pyarrow/builder.pxi,sha256=7i2pCqnP3zEnJNR51DsxNzXDmqdqEZ-XHz-lJbms5RQ,4617 +pyarrow/cffi.py,sha256=hEcrPH9KeG6NES3ZCpSbOVYhOgDOuBB_2LgMMucgw-8,2396 +pyarrow/compat.pxi,sha256=Sq5c3CKq0uj5aDyOoHHkPEO_VsSpZ90JRaL2rAKHk5I,1920 +pyarrow/compute.py,sha256=ulXzJqvAG5UrDjC5SxfaRatZ7zFZ4c6qtBhJa05_gSs,23157 +pyarrow/config.pxi,sha256=E6QOFjdlw3H1a5BOAevYNJJEmmm6FblfaaeyspnWBWw,3092 +pyarrow/conftest.py,sha256=3SnBnp7QFvcLw_I4_hN5Tu3qYFo17nCd-womgjblvzo,8665 +pyarrow/csv.py,sha256=S6tm31Bra9HPf9IsYwBLltZBLMvNzypWfeCLySsjmds,974 +pyarrow/cuda.py,sha256=j--8HcBAm5Ib-kbhK4d2M6SVQmDWkr7Mt5fnwU2LzdQ,1087 +pyarrow/dataset.py,sha256=4ibGh9x36jEYI7VMxTdZc-XDg8VfNx6RPbA2L-bLJbA,40232 +pyarrow/error.pxi,sha256=rq2zAPSxmRGLFf0QbiPc3lUP0588R2n8BKyXBZ9vQNo,8713 +pyarrow/feather.py,sha256=9rWL-TYK_qc0FW3vIyYyd6Xt86ApJWLqo-2cK3F5vGQ,9959 +pyarrow/flight.py,sha256=HLB04A0SZ35MZJumPIuBu5I2dpetjEc-CGMEdjQeQRQ,2177 +pyarrow/fs.py,sha256=M-cSbS2bBR4MwbJqpz9Q7VxHY8fa89StEw2J0XMMF7E,14899 +pyarrow/gandiva.pyx,sha256=bF23rkq6e45i-CePDZeTy9iFwoeg8ElrNjz9VK97QRs,24503 +pyarrow/include/arrow/acero/accumulation_queue.h,sha256=_HoTuKEkZodmrwXF9CeWGsmpT7jIM0FrrYZSPMTMMr8,5856 +pyarrow/include/arrow/acero/aggregate_node.h,sha256=9HdFxR6tzSfx_UaUHZtS1I2FCbm3PvfF8FdekVpBO34,2155 +pyarrow/include/arrow/acero/api.h,sha256=fRuKEHbKDYWRCwSHLc7vSD-6mQavyOsztluCR7evFCk,1151 +pyarrow/include/arrow/acero/asof_join_node.h,sha256=Ko6r1wDjxg01FE9-xKkttx7WzCAzf43GxbpvGHgKZp8,1490 +pyarrow/include/arrow/acero/backpressure_handler.h,sha256=NYtW5LH48Pp0C8k014iQ4O4yzHrPi8cjFmCHJLdS4Jc,2821 +pyarrow/include/arrow/acero/benchmark_util.h,sha256=T5bNabF1TDAp28S7V_vt_VIDn6l5Be0zOVCHhcTcFf8,1943 +pyarrow/include/arrow/acero/bloom_filter.h,sha256=GBpLmW44RVUKUuSKejIaRbfIHc_DNBfS_T6QbMTIJ5M,12017 +pyarrow/include/arrow/acero/exec_plan.h,sha256=U0KA3tnNvVb75G0XQFLVbGzXCGdddGyRhW3zMa8oWJc,35909 +pyarrow/include/arrow/acero/hash_join.h,sha256=Ji0k5z778QtNQ0MwU6xBP6z7ajLk79Va-vgCqrlApso,3003 +pyarrow/include/arrow/acero/hash_join_dict.h,sha256=lDITWRMB1vkzh9tyrdn4VsNjTx41WZEFpWLtcYUqud0,15364 +pyarrow/include/arrow/acero/hash_join_node.h,sha256=hWYk1hbqsE-R-qOQORgdPKZ8_IXaVZ2J2ZWk5ZLVcyM,4360 +pyarrow/include/arrow/acero/map_node.h,sha256=Bd1HcW0N5azoIVth2ATeHxgTKd9XmmEkz42YBNw5eK0,2628 +pyarrow/include/arrow/acero/options.h,sha256=r-GnLElNJAAdFoJ7k0Q1TOfvGSOdgT9BrWbdMcS_SF0,37262 +pyarrow/include/arrow/acero/order_by_impl.h,sha256=dQqplP-AZWPZRKio8LmTjYWlCYz9VmW-usUrtaLpd_w,1691 +pyarrow/include/arrow/acero/partition_util.h,sha256=bs_zxok-qng8jsHmVBlfJ7Ts2uBEmovEb27knqQmT-Q,7411 +pyarrow/include/arrow/acero/pch.h,sha256=8VXXI10rUHzlQiAthx-yjHMQCpGL3dgAiVaGzTubPPE,1094 +pyarrow/include/arrow/acero/query_context.h,sha256=j1ca7kLkFRX-_u3giWWx3fThQFFUk2WLf7-5DIivJEo,6446 +pyarrow/include/arrow/acero/schema_util.h,sha256=KA_hV2xy2TRccMyksSzQrdH9_rdGo3tQyHOIvrWWYBQ,7961 +pyarrow/include/arrow/acero/task_util.h,sha256=6pqILuYfcVwt9HqVhRfXFVJoOC-Q_dtk8mQ5SxjgwbY,3706 +pyarrow/include/arrow/acero/test_nodes.h,sha256=xKeLWZZC8iokveVXPjseO1MOvWMcby-0xiMISy0qw8E,2877 +pyarrow/include/arrow/acero/time_series_util.h,sha256=W9yzoaTGkB2jtYm8w2CYknSw1EjMbsdTfmEuuL2zMtk,1210 +pyarrow/include/arrow/acero/tpch_node.h,sha256=l3zocxHTfGmXTjywJxwoXCIk9tjzURgWdYKSgSk8DAQ,2671 +pyarrow/include/arrow/acero/type_fwd.h,sha256=4zLhtLJf_7MSXgrhQIZVGeLxjT7JrEDAn9yW75DTFlc,1103 +pyarrow/include/arrow/acero/unmaterialized_table.h,sha256=D_PWMgCr-XiQ_lPOjcHzBetcwqX6sYEG2YQ6idYevX4,10295 +pyarrow/include/arrow/acero/util.h,sha256=34jiBZqdddIhqCJLwwLz8SCRp3eZSUo3Plx-Ij5SKiY,6115 +pyarrow/include/arrow/acero/visibility.h,sha256=jA_FJb7WvzEbtMwBu1mOv6VOQUIXNmqf1ptn57Wy3RA,1558 +pyarrow/include/arrow/adapters/orc/adapter.h,sha256=G5SSGGYMSREILC43kqL5fqo94c4tKgukitO15m217tY,11031 +pyarrow/include/arrow/adapters/orc/options.h,sha256=FMxda5YSskRrB6h9FvcAuMxl5qdavWrNYHPlanjtk48,3696 +pyarrow/include/arrow/adapters/tensorflow/convert.h,sha256=ZGFAodnwTJK0ZoXfgYJdjgi_F4vfEhI9E87zejxVb6E,3465 +pyarrow/include/arrow/api.h,sha256=Gs6HiRBYU5N7-a79hjTl9WMSda551XdUKpWthFY2v1s,2491 +pyarrow/include/arrow/array.h,sha256=P5oW6hvD2j97bLaSTE4_UHuV6Y38DTwJVww3Eb3xdTQ,1981 +pyarrow/include/arrow/array/array_base.h,sha256=WeWLxwkqlcs_8fIqVh1PLrGkIB8t9vrFKVk1gXFthVU,11514 +pyarrow/include/arrow/array/array_binary.h,sha256=J61_k59dl06Vf1hAjyj87TXStPsylyFPk9YrYuqrRsU,11395 +pyarrow/include/arrow/array/array_decimal.h,sha256=7NF4N3Q1cTgwKb9WIYJzmHzGDxR7v1uacLcoKYcwLAw,2137 +pyarrow/include/arrow/array/array_dict.h,sha256=6AMbSnZoMj-nhQhZhG4RNnxy9VVPk2DvZjVblwIUhgY,7611 +pyarrow/include/arrow/array/array_nested.h,sha256=Hmk8eOgZ9om2xv5qEMA1YpTL_w9Xd_DyEf-ZDHvdtIE,36459 +pyarrow/include/arrow/array/array_primitive.h,sha256=855A7OSNfItG-_6RyX70z4on8RLd_ujWwVJnOKbQBP0,7500 +pyarrow/include/arrow/array/array_run_end.h,sha256=4zs3tcUrIgDOhSEOywJ1vGY2lsH-5QuEBn87mxnDbi8,5101 +pyarrow/include/arrow/array/builder_adaptive.h,sha256=92DpiIZDXSI_yOrMftj7P60zlCLjNmwfGM5ubdbXWM4,6861 +pyarrow/include/arrow/array/builder_base.h,sha256=pN_nGjN5nHzak50ZKXm956NOWTVNDTZAzwQECcUrHIY,13668 +pyarrow/include/arrow/array/builder_binary.h,sha256=qwb_VcufBGiFlRN89QCK-09qVQKYM3LYHVgne-t-7NU,32662 +pyarrow/include/arrow/array/builder_decimal.h,sha256=nvtgx_oZXWXt9FB3CmsYT1ZAf-xCOJESxhzofDBL8s8,3145 +pyarrow/include/arrow/array/builder_dict.h,sha256=0DZBDPV9QUmUwVgf5gdcIgeWNBCycUOQUp5bzXjYWBM,28055 +pyarrow/include/arrow/array/builder_nested.h,sha256=Dbugg_tizCVTjg-T9mJREY-KoBGDbsgnJYIKZQnXxHk,31308 +pyarrow/include/arrow/array/builder_primitive.h,sha256=8s7QH2Gs12faJhlIAggp9bOJqJ-r2EZdmF_GtGjFT8Y,20771 +pyarrow/include/arrow/array/builder_run_end.h,sha256=SZIdsUKK1qAc9pdonPGf0A_aikZHcxxzicezRGR5hLs,11416 +pyarrow/include/arrow/array/builder_time.h,sha256=8M2ifZnDgujSItXKsevyBdtM6Iky3ImyeIdAqZV3fec,2548 +pyarrow/include/arrow/array/builder_union.h,sha256=8BF532sAMc7JxWIbSN-yX6Z9fqY9jmmsIa054DPvbWE,10144 +pyarrow/include/arrow/array/concatenate.h,sha256=EnLex5JEnfXxkoMD_lXMoA24jO3hdGP3KTQMk3xBEM0,1357 +pyarrow/include/arrow/array/data.h,sha256=fZTeXkUpoEdxgARs0Db-LZQuSFNE5--OJamX9BNzVZY,23382 +pyarrow/include/arrow/array/diff.h,sha256=bYNKy2oLAxtt6VYDWvCfq2bnJTVNjG5KMTsGl-gT_kM,3344 +pyarrow/include/arrow/array/util.h,sha256=qVHvCaVlALz8WJwAjyMwsBm5J2iN89CSgj7NpmmqlkI,3652 +pyarrow/include/arrow/array/validate.h,sha256=JdDb3XJg4TmAfpv_zgu2ITfL2H9no10TQit-HPj9Myw,1710 +pyarrow/include/arrow/buffer.h,sha256=EfXDyFegRdva4rv4nf0jtErnIrt9_FWoXSHk6OPk_G8,23092 +pyarrow/include/arrow/buffer_builder.h,sha256=tXWILwHW0MKpve7NIU2ElElPY0y0ooISa82Dq6UdhVU,17371 +pyarrow/include/arrow/builder.h,sha256=mBxMko271lJ7Xbku0hCixj943Yx-d2i4Q5Hm2WfwiGM,1546 +pyarrow/include/arrow/c/abi.h,sha256=Dyap7-Sgpj-s0Xn_iA71xu5bUqm07i5_NNGzHUEqjWY,7875 +pyarrow/include/arrow/c/bridge.h,sha256=YB7McGhuCCV8a-SQBk_no_MruW2Q0QZthOaF7_uUNBw,15163 +pyarrow/include/arrow/c/dlpack.h,sha256=_HIa9AKR2mwbhf1aChIpMF_XDpFrPaf58Lt3fVxWRWc,1817 +pyarrow/include/arrow/c/dlpack_abi.h,sha256=KR8otSCnaHCb2IN89S6DH4oNjJFxokvvCcqcMh54Jtg,9900 +pyarrow/include/arrow/c/helpers.h,sha256=2klPp_eX0xh0cO9tiDOOcaAxnYXb7wozORcIrcHYhPQ,4533 +pyarrow/include/arrow/chunk_resolver.h,sha256=pwHItzWC8w286S2Ml0M2yG7oElb4FOiFeBiDk24A9Yk,6456 +pyarrow/include/arrow/chunked_array.h,sha256=wTDvTGr-WIWcktrfne0ZOhN5g1yO_lxsr2nvtAhWMxo,10328 +pyarrow/include/arrow/compare.h,sha256=U5craXnXACCUzQ8HmGYyhTehNrOezcVUP1ABAlxI62E,5555 +pyarrow/include/arrow/compute/api.h,sha256=IQKXz_6YBBfHKOkuqkXIh9ZTZYyVgq7aEBTIzMkZEiI,2071 +pyarrow/include/arrow/compute/api_aggregate.h,sha256=cgXomjDDHoAK_ddzyH1NSqWAewzEYPD7qJBj4x5Rkhk,17173 +pyarrow/include/arrow/compute/api_scalar.h,sha256=ECJKOL3OJ5FK4iPeJ-sL0Uf0_Yd9t5cNsK1-j-uaWqc,66244 +pyarrow/include/arrow/compute/api_vector.h,sha256=qtmp9DEijko5h4r9O4aOXmsRrQkS2ytrzq5yLBIPn-w,28683 +pyarrow/include/arrow/compute/cast.h,sha256=Xw9j03AIAMU_hZiqk9d2ZD4xTmESkfXaDsuZkiTypLs,4245 +pyarrow/include/arrow/compute/exec.h,sha256=0ZAA9_tzcQEr364sjJ3SwgTtURTwtCjRLzo_LOdn960,17969 +pyarrow/include/arrow/compute/expression.h,sha256=llX_81uUIyJ8vPmP8-2mAippyw4cVNhCGfqHRY37FOM,11184 +pyarrow/include/arrow/compute/function.h,sha256=krTXaLowvT1cKhecs70urPQcx74vQCJ4jswtBE4Xs5A,16345 +pyarrow/include/arrow/compute/function_options.h,sha256=Q9rjkXPrU9-Xi64_fMLPbBbW_byhjJFsvHppP1CumdA,3088 +pyarrow/include/arrow/compute/kernel.h,sha256=viDkg08ieQNN-QXB_9aacvYNra751ufRmRjjeARrB0E,31358 +pyarrow/include/arrow/compute/ordering.h,sha256=8Vw3VzDi1mGgVwKGQZakz9TVj0A40wxcL13EvuqNVjU,4129 +pyarrow/include/arrow/compute/registry.h,sha256=x7LHiaNEVvZ0VUssZFsasB52Z1AxRflkdI5tR1hhzqc,4837 +pyarrow/include/arrow/compute/row/grouper.h,sha256=Rd7zt7jUjvgqWr2bsKRub3ZhF00-7UXlQE9TzvpG5Wg,6804 +pyarrow/include/arrow/compute/type_fwd.h,sha256=-O63QUbsxWws8TBi55x6u9FweUSSOOfizhE4pTczLd4,1537 +pyarrow/include/arrow/compute/util.h,sha256=xG95f_YfQo5CDM4O-lR_z9s5IkdP2m-loXNe76hhrYI,11603 +pyarrow/include/arrow/config.h,sha256=8liyKI0CJO0G-Fz5I--QjIAwh0m4hosfyAOwvVVs0sU,3044 +pyarrow/include/arrow/csv/api.h,sha256=LbwWhPyIsi_73hvsSr77RNR9uUxrVyXM__hp7QcSom0,907 +pyarrow/include/arrow/csv/chunker.h,sha256=nTs8hdy4D3Nz3oZWm2JMuA02noY_0pWRYWq_RptqzHY,1171 +pyarrow/include/arrow/csv/column_builder.h,sha256=7oa9YCg2Uc2mB7ExHIyYIvbdt555qLXiU0y4FepkISU,2890 +pyarrow/include/arrow/csv/column_decoder.h,sha256=10idcPJE2V_TbvgjzPqmFy1dd_qSGWvu9eDkenTuCz0,2358 +pyarrow/include/arrow/csv/converter.h,sha256=cjtnz_hZFxm_dWjAMjr1iqqk1egXI2Yb8Bd0xC8md5E,2789 +pyarrow/include/arrow/csv/invalid_row.h,sha256=gTHjEbjkpee6syLGA8hFY7spx1ROMJmtMcwhXv21x5Q,1889 +pyarrow/include/arrow/csv/options.h,sha256=_HkjSoiAPW77z5AHVVnTa452y1KfJgnXWXz2NoPPAYw,7980 +pyarrow/include/arrow/csv/parser.h,sha256=8PplRh3Qxckk8VPyM70P_f1MBb4WMGnNVpoeJ9kOdHU,8616 +pyarrow/include/arrow/csv/reader.h,sha256=416pt3yNQsgn4RhIyRMsmSJmvv1sw3ouQotubXG91gQ,4606 +pyarrow/include/arrow/csv/test_common.h,sha256=uEYzw8EROvd1QMBQ98d4MaZ7BqMlw2e0flAyz-du0Z4,1972 +pyarrow/include/arrow/csv/type_fwd.h,sha256=ptVbengmY_a7Yz1w0SKmKL16yyw9yEeym0Q0cnRCSV4,984 +pyarrow/include/arrow/csv/writer.h,sha256=hJXNiiGWh60ReCeScmpCLa2lMRsIK_TT6VSSBz2WNao,3525 +pyarrow/include/arrow/dataset/api.h,sha256=e_S0CIztGWNnF7iH5ThCz-6VrZhBdff0nGN1Nin0LgM,1314 +pyarrow/include/arrow/dataset/dataset.h,sha256=sDkJg42vSE05FwRmYi9pes3jD9932X3J8cyYZ3SY2jI,19830 +pyarrow/include/arrow/dataset/dataset_writer.h,sha256=TQV75b_UigfGjIpBnPk8teOncM5WroKfKV15oicBRRY,4589 +pyarrow/include/arrow/dataset/discovery.h,sha256=x7-5NBAyEeQWGlWanJDLZAoWksKiMwM96tlDx_M6n5c,11236 +pyarrow/include/arrow/dataset/file_base.h,sha256=2oe5v8Qy6v_UthJavg9rjU_WuQvwXcJengWwc3sWLqk,20203 +pyarrow/include/arrow/dataset/file_csv.h,sha256=7PlvQW_2FJ5RRN-VH4-OBw5cZ6nkd0KE0sj1TQvCZeo,5016 +pyarrow/include/arrow/dataset/file_ipc.h,sha256=6-btvXhflZsAH90T3wMkwzZkte6T4ixzeCEUn_5uYW8,4083 +pyarrow/include/arrow/dataset/file_json.h,sha256=sPjOeMOtbZZbvOivnOdb4MvYKHltpTnY8fONkhB9PZs,3523 +pyarrow/include/arrow/dataset/file_orc.h,sha256=P7nAD9nacVngDEjH8ChQRt0AQmDg4Z1wBx360LDOoSg,2452 +pyarrow/include/arrow/dataset/file_parquet.h,sha256=bzArl0XrmtTNvWhs6YTkLFxtD8TLbTIJwYmWz3YRm38,16708 +pyarrow/include/arrow/dataset/parquet_encryption_config.h,sha256=Upo0k5MijZaMaRZjPp5Xg8TRt1p8Zwh2c2tdimjVe1A,3425 +pyarrow/include/arrow/dataset/partition.h,sha256=3wrNekD_-fPO1YW91Za-T4muCfQeAX7SZRIcsCN_czI,16815 +pyarrow/include/arrow/dataset/pch.h,sha256=iAE_PbVtKHfhygz7Ox9Z2nlhsIrfageGixGKjlzNRvg,1194 +pyarrow/include/arrow/dataset/plan.h,sha256=IjuR9K2sWD85_2HpVVoJ-3YUCq--UPblHU46exX5qRg,1181 +pyarrow/include/arrow/dataset/projector.h,sha256=KfZijq09Ht0Z2cJHsrjg-sE3SiZ4TKainflReK-39cg,1135 +pyarrow/include/arrow/dataset/scanner.h,sha256=Pt5cNiZ6JdXd0K2ZCqkLwV1VQCTrB8ju0s3RcsAEHJY,24335 +pyarrow/include/arrow/dataset/type_fwd.h,sha256=YOUSRwdNAlXJ7meFLolpAFQ_mSlObs2F81zcOy0DoI4,3170 +pyarrow/include/arrow/dataset/visibility.h,sha256=Hgw4i_M6nvpDfmNYZYF-Fupk0VjDIPJWpWepci2WeQ0,1528 +pyarrow/include/arrow/datum.h,sha256=cx7siEHVHm4yMiINDNc8EYXsNnQ4mlUxq6SBsrcuAbY,11416 +pyarrow/include/arrow/device.h,sha256=m3Ijbr82wT079gP9BeSAKa9txWmi8gsgZD0gc8mcClQ,15452 +pyarrow/include/arrow/engine/api.h,sha256=ORM0M5KQeurjEG8Eoa5IeV_ZgKBRPlWyicyv3ORWkAY,886 +pyarrow/include/arrow/engine/pch.h,sha256=8VXXI10rUHzlQiAthx-yjHMQCpGL3dgAiVaGzTubPPE,1094 +pyarrow/include/arrow/engine/substrait/api.h,sha256=W9NB1RAm0ZVxztRXYA-GD7H8XLQNXFoYT7TdGFHoNTE,1079 +pyarrow/include/arrow/engine/substrait/extension_set.h,sha256=FE6cceycuQv7CCe_Fl4t6tIMRyfoJfWClUhSvHgcm90,21552 +pyarrow/include/arrow/engine/substrait/extension_types.h,sha256=x5ZIuynNh6WFt3wRjW--zUsuC3SeDLk1qRg9_xhswWM,3075 +pyarrow/include/arrow/engine/substrait/options.h,sha256=dtvUty_zoDmcFwVflppiDzelYkeOhCO74uRF6izQSzk,5820 +pyarrow/include/arrow/engine/substrait/relation.h,sha256=V3VKFlDdE61e1OS8LbJiwvm5w0uq5bzBLhKqmgmKaws,2385 +pyarrow/include/arrow/engine/substrait/serde.h,sha256=mjxfuFo4aPhCiwefpKAJMIlknF4UOHSr6gWU__1SwCc,16528 +pyarrow/include/arrow/engine/substrait/test_plan_builder.h,sha256=REFa79D1AOIIjp2Iez73iw5gEnzG9Rac9t8WwiGLsuI,3003 +pyarrow/include/arrow/engine/substrait/test_util.h,sha256=IHZeYrk50Sx9anJfC25DWP6XesItKEywDWUqvUJcjEQ,1517 +pyarrow/include/arrow/engine/substrait/type_fwd.h,sha256=P9YRjAQpSgoIjDC0siYyxoQzcPVo3r9y85qjiMtudBs,1028 +pyarrow/include/arrow/engine/substrait/util.h,sha256=_dRiQBaIMWNbsYG7kuXhs3dMk4dI63-pM0uSxYPOvgE,3570 +pyarrow/include/arrow/engine/substrait/visibility.h,sha256=GZJ-aQeMgakW526traDzY0Rh-1fXoyRnk63c0J9PdQc,1682 +pyarrow/include/arrow/extension/fixed_shape_tensor.h,sha256=xhFcjqq9UfhJcboY8NZA17RfzLVB9Yuc-1ZA3Kc7QnA,5594 +pyarrow/include/arrow/extension_type.h,sha256=4Vx3Jqf2-QDQd-NYtj85qy1HlJjMs4bpz0BPWcBlOUM,6477 +pyarrow/include/arrow/filesystem/api.h,sha256=egNxiiuzn2BhLCobqaY4utReilgQiC40j8gY7jxjUW8,1377 +pyarrow/include/arrow/filesystem/azurefs.h,sha256=JFUVac4GBXpcQHyBXuNUZqsS-NjCU4_Sh7ICt06CDGA,14560 +pyarrow/include/arrow/filesystem/filesystem.h,sha256=q8I7HFwW2E-JZMX3hCcNZ6qDJFsKu83LWAE5tPexvwQ,28263 +pyarrow/include/arrow/filesystem/filesystem_library.h,sha256=axaof-G9GxBjzXhRIt4azB7HB8VJ49MtGYsL7pSO0A0,1725 +pyarrow/include/arrow/filesystem/gcsfs.h,sha256=wzVfIkqhUp-aw6NFNhMbvl0bczty3HmdiYG36oPCDS8,10533 +pyarrow/include/arrow/filesystem/hdfs.h,sha256=Jn91pjfk6RMx-MuAWsEAKLTyKQ7bDPNA5jMEVzafSgc,4133 +pyarrow/include/arrow/filesystem/localfs.h,sha256=PzqcmYYO-m0Dhn4X0XiLa4VdOJgHWBPHyKZEcZd5nHk,4908 +pyarrow/include/arrow/filesystem/mockfs.h,sha256=kohu7s9s9xtd75sGTE2K_rsHW89swDOtSSSFxBixMcc,4768 +pyarrow/include/arrow/filesystem/path_util.h,sha256=hrDVHk4F9M7oGABB4x2wKfQMjSlSAIS0IaLVv2jHrl4,5698 +pyarrow/include/arrow/filesystem/s3_test_util.h,sha256=ffeqZmR8G8YyzbpUWws2oSEchYPBt254jwOHWdkcWQo,2767 +pyarrow/include/arrow/filesystem/s3fs.h,sha256=22Tqq7XsipgGHbsUzhVq77pAVotCVCQqljTH3I1LrAs,14975 +pyarrow/include/arrow/filesystem/test_util.h,sha256=rnM8D75gQxdOR_sTQZzFxhjsQkD0SlEr0UyTpDxs9NE,11184 +pyarrow/include/arrow/filesystem/type_fwd.h,sha256=zztDER55Wbt4rVnkd-ReeDO-YnrpemftFeFtZ7ZGidY,1462 +pyarrow/include/arrow/flight/api.h,sha256=YotLTQn-KCl6y5BIg8coEFZ9n7PMtJ02ly7Pc5gmX7U,1257 +pyarrow/include/arrow/flight/client.h,sha256=SNZmU3Hfv7BBDlsNPfEPd2RHh_R0WdX36D323YjqfbU,17947 +pyarrow/include/arrow/flight/client_auth.h,sha256=a3Dkm_jPOuqzNsDA4eejuMUwCEBMavM8uS7w81ihbRY,2216 +pyarrow/include/arrow/flight/client_cookie_middleware.h,sha256=5zkCP2SxMFQuTX8N9NHxOve5J_ef2rFO6-xY4Tfnygk,1204 +pyarrow/include/arrow/flight/client_middleware.h,sha256=aAZwCahuiBhP85iMPe7xNWvidBR9KeHGto2YAqJioI4,2948 +pyarrow/include/arrow/flight/client_tracing_middleware.h,sha256=d0sTmUOfq5M9FMliIKK-flJkR6-7r69NjU2TpxhfqWo,1217 +pyarrow/include/arrow/flight/middleware.h,sha256=m2Logo2J4LEo81NRNmzFchsbGXpK9k4o8mG-EBW6mdw,2278 +pyarrow/include/arrow/flight/pch.h,sha256=Dp2nrZ3t_KPjm0cIMyu913BbCorJG5rmbtpfyDN09bo,1192 +pyarrow/include/arrow/flight/platform.h,sha256=6sZyJEX2U3VU8jc9Eh6qmZ33rpQE6egw9uqQIegWFu4,1207 +pyarrow/include/arrow/flight/server.h,sha256=Hb_XJ8ZhJOEnUq4joEEJdJqULWWR_mtAi4ITGl1ojhg,13234 +pyarrow/include/arrow/flight/server_auth.h,sha256=zKQ8lvkMBuMYiIfT1sU0MPXqVPQikaOS3npBgytcaKk,5429 +pyarrow/include/arrow/flight/server_middleware.h,sha256=5DbEVJiE22NSbpVkTxCh3qkPEhz1d5BrtpNJtkNFFy4,4344 +pyarrow/include/arrow/flight/server_tracing_middleware.h,sha256=zR0FFZYGwAAqhzVhPVDjyXfZda9zmLteqauwA5dgR_w,2186 +pyarrow/include/arrow/flight/test_definitions.h,sha256=esAWPIVJxTQqGpPTxa4Dm_HdAnzK-4DoJAb3zFtQBiM,13022 +pyarrow/include/arrow/flight/test_util.h,sha256=HfuSsy5PhEcvITr1vd7SvFUXKugPI5n-pYGSSWB8lEE,9420 +pyarrow/include/arrow/flight/transport.h,sha256=ZZq4886RO_ZcU-yndeXqW_aviE3h2QtHIfMrlWs9Dqw,12265 +pyarrow/include/arrow/flight/transport_server.h,sha256=iVdXmrb2pemh4o6BxwvB7OZAV4UeoWrbhe4ePZ5Pi4s,5268 +pyarrow/include/arrow/flight/type_fwd.h,sha256=tQFAM3QNKPdzB4VqUGdEUFjNPYXVZLApwGnSus2GQx8,1797 +pyarrow/include/arrow/flight/types.h,sha256=CN-ePLa_8ukCFHJoxFLVxRoFOb_tohVXlSQ1bqvyU5U,40950 +pyarrow/include/arrow/flight/types_async.h,sha256=gRmvjOUxI2s7Y7kfgo6wwAXx-GMeRcFQ8wI_JfZ4iDs,2667 +pyarrow/include/arrow/flight/visibility.h,sha256=Zi4bUs011ZIXE_71X8bUAXXr9WBo7LpXWdU3b4ZTERY,1538 +pyarrow/include/arrow/io/api.h,sha256=Pn4jZSTsLW8MAlMyXUokmJdupX54u154GYI5AvD5ByA,996 +pyarrow/include/arrow/io/buffered.h,sha256=YFKKAHStUFncnfpwnk0XSZAZLeLX-LAXV1qH9VGaE1k,5845 +pyarrow/include/arrow/io/caching.h,sha256=AAjoyKwQ06m2XiglFS6Ch_cdg2p4-wkA7GakGI_eX1E,6708 +pyarrow/include/arrow/io/compressed.h,sha256=3JxIOo1q8VhjIErfwVM5ZLVkwwQKXd-FT5517j58etA,3774 +pyarrow/include/arrow/io/concurrency.h,sha256=7BSmXQGTJKKMpJVtR4hxpp62KPTIKU1W3DfC17SrlmA,7960 +pyarrow/include/arrow/io/file.h,sha256=-ZEklW1Q0sj3pYCQLQ1ebirKd3s2GI3vUEIszFr8mVU,7625 +pyarrow/include/arrow/io/hdfs.h,sha256=2s3f49ggAYgSCsX5SoqnomwsXd24_IZhW-VSBJclqTg,8559 +pyarrow/include/arrow/io/interfaces.h,sha256=QIBHTJUobEkwcqnKMT_GEKu5ArzpeGmK-8v7z4qGHIQ,13428 +pyarrow/include/arrow/io/memory.h,sha256=htc3MmEbEvwc28bLjCtTtt9QcYp-10WKLmX0V9TnwRM,7048 +pyarrow/include/arrow/io/mman.h,sha256=kSDngySxJHw3r3OEonpOFbtlPSJX2p4VdOEM3ImozuA,4109 +pyarrow/include/arrow/io/slow.h,sha256=8-ZjQJq49EQJ4esQ6qHHjlKCeZNg4BSND7ire-ZtLYQ,3942 +pyarrow/include/arrow/io/stdio.h,sha256=dqMTHoJbmiXcyNa2fN60tSWQsx0GPphZVCLdGiZNt8I,2095 +pyarrow/include/arrow/io/test_common.h,sha256=Rj8mwgcUkzksrlBALiAldtr_6JGHJFLh2SztGVkRiSA,2112 +pyarrow/include/arrow/io/transform.h,sha256=W9XWonw69VymQAaQptfW7jD-6ry7VCpfPXlkB7aZzOE,1890 +pyarrow/include/arrow/io/type_fwd.h,sha256=Pi7EFpFvBXsFN1xKOyZjTSP95xNDs6W5hxb5GucoVVE,2315 +pyarrow/include/arrow/ipc/api.h,sha256=olkdu82mTS8hmwD53DBJJL6QQ0YBplhs-s-m4uOInSQ,1007 +pyarrow/include/arrow/ipc/dictionary.h,sha256=UTjZPIG8mLZOk9IW2QnR9RZGr1npexZOp103fv-O70E,6104 +pyarrow/include/arrow/ipc/feather.h,sha256=uCnxwO7eUH18kJ-lWz9IWwSj6AjfejqqLdoifJ-UBDo,4918 +pyarrow/include/arrow/ipc/json_simple.h,sha256=IjFjx6Z7h_WLXt1paVIJboUOTR5GFBhWUhCbm_m9lNk,2455 +pyarrow/include/arrow/ipc/message.h,sha256=KtMCbIC2J4-5iyPG5Sijqu_MALxiuKWBYZhGnw0jxOQ,20011 +pyarrow/include/arrow/ipc/options.h,sha256=X2BbCaQ03S1uqedgLRbvLyfb1PHZ7WGRBjDLLCbQMGE,6888 +pyarrow/include/arrow/ipc/reader.h,sha256=NqdrqqAEItO1ecYUINRO7-qhKlYy-CHSJKGI2hdXlRQ,24106 +pyarrow/include/arrow/ipc/test_common.h,sha256=5w3ZPzzSQsq5wQGHVKoiDynwdhVj3n2Fqz2pi2w6GiA,6185 +pyarrow/include/arrow/ipc/type_fwd.h,sha256=Ty8ET7nLI4JJeTqDMyP0pEH9QVj9xs7BpJkZrnrpaPY,1440 +pyarrow/include/arrow/ipc/util.h,sha256=wTkfC9YFKZlAAjyzlmQVZcW90oOj_JatjDN4qz0IxHg,1414 +pyarrow/include/arrow/ipc/writer.h,sha256=hum8E_orkG_X38vgyfyKhGbyvcLJ3AkXEykyBjAXIYg,18870 +pyarrow/include/arrow/json/api.h,sha256=XRW1fP43zVqwy1yabaKctNK9MDZqnxkoHDH1fx5B3Y4,879 +pyarrow/include/arrow/json/chunked_builder.h,sha256=DDuMwrImMECw6Mhfncn2xMOjkFcKUV1O1597_fSFSAs,2365 +pyarrow/include/arrow/json/chunker.h,sha256=dkZOcxsF1Q3ek58P7IoA8f3lQyBQpFvGSFeynNV2Olc,1119 +pyarrow/include/arrow/json/converter.h,sha256=3lXsP3BSdpLPIkFAJnYW9vP8BbX3neVYR_W0zFKClQ0,3134 +pyarrow/include/arrow/json/object_parser.h,sha256=Y_6Oceya06aUyeo-1k047dm2-JUMJa2_w9iyZ-goIRQ,1627 +pyarrow/include/arrow/json/object_writer.h,sha256=5RmF3f6Z-4E85d-blSoaLYSutAPxfz5OmiqcZJewXuY,1410 +pyarrow/include/arrow/json/options.h,sha256=EypQgDwLZQbrPnAh45nSPfpGGYrxvLgfp1eAG_l0p3Q,2227 +pyarrow/include/arrow/json/parser.h,sha256=3oIzO5kUs2Takc7t_d5mH7bp1uIcc1M-qbuHmPoSI34,3383 +pyarrow/include/arrow/json/rapidjson_defs.h,sha256=pLbnUIRlYlC_fYoIvivKidMqQNPGMI-aSIf-PVgcE2U,1468 +pyarrow/include/arrow/json/reader.h,sha256=KNO9dCyc2RZs7WxUSEW7bpCYBh_h1C3U52YHYxBnP0M,5212 +pyarrow/include/arrow/json/test_common.h,sha256=YiiY_jswpp7Nu6IW1Y2lBhqWSFRoNaNEy1jHd5qkYHQ,10874 +pyarrow/include/arrow/json/type_fwd.h,sha256=o9aigB5losknJFFei1k25pDVYZgkC2elmRMX1C6aTjo,942 +pyarrow/include/arrow/memory_pool.h,sha256=SjPtWz1tx6Lotr2WeOKCCIw9NQc50Zjez3yzgfr7SDw,11064 +pyarrow/include/arrow/memory_pool_test.h,sha256=qv7csk6hZiO2ELFF-1yukpppjETDDX0nuBFBbPFHtMU,3350 +pyarrow/include/arrow/pch.h,sha256=MaR9bqy2cFZDbjq8Aekq9Gh1vzLTlWZOSHu-GhWP1g8,1286 +pyarrow/include/arrow/pretty_print.h,sha256=ZDlroPRr9_ryCk7h_rjA8pL7BNgaJQ9HnRb2PZU63lg,5529 +pyarrow/include/arrow/python/api.h,sha256=W76VAxYqOxi9BHJddji1B62CmaWDFuBhqI65YOhUnGQ,1222 +pyarrow/include/arrow/python/arrow_to_pandas.h,sha256=jUBEUMKXw70oJdMlgkSf6HitaNweQcc7hxI75_C9WSI,5561 +pyarrow/include/arrow/python/async.h,sha256=C0f8YYmgwBGgDau4xEFsdjukiZB4YvpylETHEZryHOo,2352 +pyarrow/include/arrow/python/benchmark.h,sha256=f-kzyMOlPKDse2bcLWhyMrDEMZrG_JHAPpDJgGW0bXU,1192 +pyarrow/include/arrow/python/common.h,sha256=yjljfJK1f7slZ7DBQ4LTo_pob70zioswJNWazy0p-uM,14412 +pyarrow/include/arrow/python/csv.h,sha256=QxU3B-Hv_RsoEcMGS9-1434ugouL2ygC64Lq6FgviNM,1397 +pyarrow/include/arrow/python/datetime.h,sha256=6g9oTv63tvGlc4WH6kfVtfUocFyy0KYr664SRdb3pes,7927 +pyarrow/include/arrow/python/decimal.h,sha256=kDDjLzW07D7d7omWSR4CBF1Ocskp4YSZu4Dtxu-gRUg,4726 +pyarrow/include/arrow/python/deserialize.h,sha256=Mdt9Lllc8bBwMHEusK0tmEoyuMdGGQuvl6hc6kbb1l8,3889 +pyarrow/include/arrow/python/extension_type.h,sha256=0gzb42y_mbw4fsYs3u8cwPFLBRlG-kkHQLgbvGtrY0U,3181 +pyarrow/include/arrow/python/filesystem.h,sha256=FG0AcLekqaDf9IQPqKixAfIcY_ZLgIKP5NvvXdtBVUM,5126 +pyarrow/include/arrow/python/flight.h,sha256=p-lR6FUbPTil5CE54t7Y8paGtf74iuF1k4R4iqk3QWM,14269 +pyarrow/include/arrow/python/gdb.h,sha256=H-qvM-nU8a_3Z5tk8PvppTwQtBMSZhQKQIVgRAsRfFg,972 +pyarrow/include/arrow/python/helpers.h,sha256=s7l6fUrCyPyrx4kp5WaKaegIAIeW3vbKXq3aWzmoQWM,5441 +pyarrow/include/arrow/python/inference.h,sha256=FUFvB4Zy7V-tueXdmbDcqTeLK4xj5GZEeRW5yhiJlsU,2038 +pyarrow/include/arrow/python/init.h,sha256=3_RJGccmdF2DX2XSDZ1lzDQ2_G9D_uraZFAj3ImAPXs,948 +pyarrow/include/arrow/python/io.h,sha256=4jGnodpSUlnVqAVh9fWId7H4WldlLPkXyroABpdaW6w,3858 +pyarrow/include/arrow/python/ipc.h,sha256=SZbw6jCCqLiLNCY3k632GmwHeD_r_xrDS0dhqV49VhY,2259 +pyarrow/include/arrow/python/iterators.h,sha256=4U6rN8RIfZIjDoC1W4mhnCbo9xLSxBw0nAe3AJGd5vQ,7193 +pyarrow/include/arrow/python/lib.h,sha256=_XgqZ_3T5W9PqqlxWG6Mkekxgy-j7YV7LdBBQzpc6gg,4631 +pyarrow/include/arrow/python/lib_api.h,sha256=3YSCQN-GF_-nWfkpEkQmR1Q-Lrk2XZUqk8o1_ttBkgw,19487 +pyarrow/include/arrow/python/numpy_convert.h,sha256=y13eHwfe1lJKzadoTr2-GyX6xPsE6Z7FN31s7PN-2Rk,4870 +pyarrow/include/arrow/python/numpy_interop.h,sha256=ealkc95wDfJgfj3WLE2ymYSzoZwBXq29w7ttBAIMLbA,3392 +pyarrow/include/arrow/python/numpy_to_arrow.h,sha256=z9KapsuoOSpWILPt9bea7GR4BL6AQ28T6DUO0mSkh3k,2760 +pyarrow/include/arrow/python/parquet_encryption.h,sha256=yjUJwCraWb4dtefBiuepqxwqihjH6QvoscDIl5m_-vo,4819 +pyarrow/include/arrow/python/pch.h,sha256=vkbgStQjq820YeHlXBPdzQ-W9LyzJrTGfMBpnMMqahk,1129 +pyarrow/include/arrow/python/platform.h,sha256=5uLRFnjSHkajDVstni9BzYwjp_mt6_GD6LTmecEhWW8,1406 +pyarrow/include/arrow/python/pyarrow.h,sha256=TK3BtD9n3QKOQ9dX3LXbQc0hu9alWcufV0O93iQW7B0,2761 +pyarrow/include/arrow/python/pyarrow_api.h,sha256=7l0G4-_m9yALYoifsY8Z6qh3HHD0PgkpVSgCn_JaGU4,867 +pyarrow/include/arrow/python/pyarrow_lib.h,sha256=-70_Ckj3_0ImlzaXSJOE_d3w9pGM66lXiGPyln9c96Y,863 +pyarrow/include/arrow/python/python_test.h,sha256=ea32mM20uHySlygi9MtVxr26O-ydTZHCUQIlxaIMjT4,1195 +pyarrow/include/arrow/python/python_to_arrow.h,sha256=BoVytf6P7PBYXyznchElKZSFvEsFyimB-tLFdw0AUNo,2521 +pyarrow/include/arrow/python/serialize.h,sha256=-THipGzcNDuBPVQ6WNM90oqQh1hBDvbp-YqEC1-gM38,4395 +pyarrow/include/arrow/python/type_traits.h,sha256=B_NsRT_hZG8D91sTcihJyKF5SrslPcFmj12QfbpHuLI,10093 +pyarrow/include/arrow/python/udf.h,sha256=de3R8PhNJO5lT9oCqRxe8e2_SE3jBpHOkwbNqCrlgjQ,3104 +pyarrow/include/arrow/python/visibility.h,sha256=ZCeRVYq2xXWoeqCzO3pzhjgB2jjQtcQNXDLdStu_EJo,1339 +pyarrow/include/arrow/record_batch.h,sha256=e-4AemqWveQ3Yqnc7Vvd3URc9m9ElBPag12L8SyMEAs,15902 +pyarrow/include/arrow/result.h,sha256=1NmZkkVhjVe1CAI7dFXRFdNQefEtk1lxMCF92o41ROE,17739 +pyarrow/include/arrow/scalar.h,sha256=6M9lWl3pzqMqzGeJ90FQGLpdwH8IDNJVPhlG27lZP9g,28343 +pyarrow/include/arrow/sparse_tensor.h,sha256=dd6eQmCjfCmmI76hgsC37R-qPJ11IMhafVaxSo2XJFs,25205 +pyarrow/include/arrow/status.h,sha256=St2Am5Ec7tfNDJ-Q99ohmpflPFp36gvo77T-kkzFCAU,16387 +pyarrow/include/arrow/stl.h,sha256=yGoKi-YUq6DgxkIW27S5B0_rXd2YiUrdzA1YdvHNCHQ,18164 +pyarrow/include/arrow/stl_allocator.h,sha256=TBbvjbuQIH9y88FI2SaqAL7pOIt3wZ1xMKwXqeKNiJE,4956 +pyarrow/include/arrow/stl_iterator.h,sha256=2nzrza4st-mdii2dqBEGCzql07t-M3rbDQjvzm8S7sY,9963 +pyarrow/include/arrow/table.h,sha256=jfiiwdpc0FkVcsyT6jQnb6bFHuxPcUaP_mjzrMCl7fg,14564 +pyarrow/include/arrow/table_builder.h,sha256=LRcLCL2iUrj6vF4f9AjPswVjqtqlMw7z_8VBAfUJeCo,3763 +pyarrow/include/arrow/tensor.h,sha256=0_Y2wq0iFNVPZGb1ZnNa396evrj-jdUTxiMKChI3-LY,8913 +pyarrow/include/arrow/tensor/converter.h,sha256=RZq0Try_kiZ085_d_CvhewMsd57InGb2TCeiveaf-Oo,2891 +pyarrow/include/arrow/testing/async_test_util.h,sha256=IrHWfPeIyhrgeTGHUPLt92LdsofmFX6khjngWsZv3dY,2262 +pyarrow/include/arrow/testing/builder.h,sha256=nda5rtd8Zp_UImXB9PKyW_v2b4XrXP2tRQDzoCwMnZc,8567 +pyarrow/include/arrow/testing/executor_util.h,sha256=38_rF-V_9zF1ttJMspkPiI-34VU1RDjg1ADBS8lUFHk,1885 +pyarrow/include/arrow/testing/extension_type.h,sha256=lcgyg0xXuWEbXBaI5urPTBBoCk7G4u-0TSoj6PKwSBY,6575 +pyarrow/include/arrow/testing/future_util.h,sha256=qIhi417OGMWSMUSDHjkGTYd-ihZbqw8ZSIRwJ01vbKg,6246 +pyarrow/include/arrow/testing/generator.h,sha256=h9Kw9GfDnCHDLl7IsEgaLCi8UDu7R6MHL7Au2TWfMVc,12024 +pyarrow/include/arrow/testing/gtest_compat.h,sha256=77dWG9oyd-h3acAEYbP7IjbeIBoAGXl3CLGLI1DmKnQ,1299 +pyarrow/include/arrow/testing/gtest_util.h,sha256=54hZuuCYNfrdghG3zv0I4BJiw1_KqG6OlLFDvoPyo0U,24343 +pyarrow/include/arrow/testing/matchers.h,sha256=3ys7UI6YpFeMvFCgjmF_VWn1w7Hzhqbr2c-_EuJBpnU,16852 +pyarrow/include/arrow/testing/pch.h,sha256=wKPN4rZnVcQbmpn02Sx5tSa7-MEhpUR1w-YJ6drtyRM,1164 +pyarrow/include/arrow/testing/random.h,sha256=mRrOyZzgD4bCGItddsBgkOXkqokM9sRH2fkv_7AXMw8,35068 +pyarrow/include/arrow/testing/uniform_real.h,sha256=-G_2J9cvevoCtB55vsCsWtJkMUHLIMyOwdT6G8ZW45Y,2970 +pyarrow/include/arrow/testing/util.h,sha256=Vr_F5jZQo6kd2-PBq5M0IjODeuaY7cNU7dDovpnPtLQ,5391 +pyarrow/include/arrow/testing/visibility.h,sha256=C_kt7rpKXh1-X_K0UtonGk9BugajMhyXAmsJp9rvY78,1548 +pyarrow/include/arrow/type.h,sha256=KvKqmtuL7WX3y5SsXrCInUWHZGAYHYZTdQ4oubLlU-I,93348 +pyarrow/include/arrow/type_fwd.h,sha256=L5jHsDtosCEQWQi6vEavOwifytJDp4vE0qomQRaSQ5k,21853 +pyarrow/include/arrow/type_traits.h,sha256=IHqfM2Rk90UMsNk0B3yKL6JPQ6c2p_FmTJTHu-DCa0I,52883 +pyarrow/include/arrow/util/algorithm.h,sha256=045EVzsC9rThlRVFaCoBmmtWZmFy5y28PR9yapn9sXY,1229 +pyarrow/include/arrow/util/align_util.h,sha256=DG2L24KReTiU8nFpXLigbflkKouKWTPUf6osQs6mxiY,10669 +pyarrow/include/arrow/util/aligned_storage.h,sha256=ZsAqIA3DV3jIhCnC8mmA4J7FCnnQ-CV-gJj_T_pTmsI,4987 +pyarrow/include/arrow/util/async_generator.h,sha256=Akh2LxF-64R66PMp6t2yXfWR0xX0s0kMUvfbh-Nd8g4,77715 +pyarrow/include/arrow/util/async_generator_fwd.h,sha256=Y7EZ4VXdvqp7DnzG5I6rTt123_8kQhAgYIOhNcLvBdA,1737 +pyarrow/include/arrow/util/async_util.h,sha256=1nnAJZ22iK7wSzmvZDo3PMhuWqJIt2qKdlXzTyhoCK4,19759 +pyarrow/include/arrow/util/base64.h,sha256=qzcBE98cg8Tx5iPJAvQ4Pdf2yc6R2r-4yGJS1_DEIeY,1095 +pyarrow/include/arrow/util/basic_decimal.h,sha256=nktXDBaDf821f75Y-34vHwS-qXSLyQKYe9yT1bDvgAU,18793 +pyarrow/include/arrow/util/benchmark_util.h,sha256=SG3gfwE-wGNZAwpL3TvffnSiZGM2cztV5xRBnbqy2Mw,7641 +pyarrow/include/arrow/util/binary_view_util.h,sha256=TICUComIZdYBIzOiRy-mmVgrCOwmThnmLHotBz1s5xw,3829 +pyarrow/include/arrow/util/bit_block_counter.h,sha256=iSIemzizxVokwC0Ze6SjSi-al_nrP2ViXF6JPoIVUWc,20162 +pyarrow/include/arrow/util/bit_run_reader.h,sha256=IWDww6Dm8OFsCRlJ0hEpJKiHMK3nUM3pqbd09mZhcIQ,16616 +pyarrow/include/arrow/util/bit_stream_utils.h,sha256=pv7HmKXJkW_wMRKzf89jUjOKGvkqxj4jxYYViZBm0Ws,17325 +pyarrow/include/arrow/util/bit_util.h,sha256=ta3A0YGhlIw7TAs9acWAWFq19L_R1jvI5U52mgv3HB8,12111 +pyarrow/include/arrow/util/bitmap.h,sha256=qDoNl-S8QFoZ220HsAtAN-s-Xm5JcnjOXNOGdaIssL0,17462 +pyarrow/include/arrow/util/bitmap_builders.h,sha256=zOb7Q-eX9vm9rkgu0Z3ftUDsI1xPthxJ_iC4qDYR1is,1563 +pyarrow/include/arrow/util/bitmap_generate.h,sha256=m6ZsNwx1GhsEktQr63NxXHQkX2B7Nti011XYsPg2xfo,3661 +pyarrow/include/arrow/util/bitmap_ops.h,sha256=87_SXoqmVPRC6umXFitektDCIeI8yOalYWUonzdWjt8,10750 +pyarrow/include/arrow/util/bitmap_reader.h,sha256=pLrMDWhVo-Qb3V1mLASAz_aI6QZxDHRr37EtqxqGd9E,8353 +pyarrow/include/arrow/util/bitmap_visit.h,sha256=myn8k66VrvZnL6R6VW6IDPTfO68VxjbJ8Up5IuSjFL4,3470 +pyarrow/include/arrow/util/bitmap_writer.h,sha256=a4goXhLlY0qcfvYxbfbGD_HZ8Au1wFcbV1tVF3BPaXs,9383 +pyarrow/include/arrow/util/bitset_stack.h,sha256=D49IZZSzZOM2hqh6b-fT0vgRISf1mQnl4oG5nnLBZ4A,2776 +pyarrow/include/arrow/util/bpacking.h,sha256=qiiYXgZLWZcYX6sm75_vBQ6qpHtS1AwasL59YQL2Ptk,1175 +pyarrow/include/arrow/util/bpacking64_default.h,sha256=q7kf_BW62k45v1qMtnJtLIPk8VtJIALc5nXkYmISy3w,196990 +pyarrow/include/arrow/util/bpacking_avx2.h,sha256=ymQJGQc54W3zbrSoktjbAcBnWwbq_SphiXLLI-G6fHg,1009 +pyarrow/include/arrow/util/bpacking_avx512.h,sha256=Z_rAQpiKJEH-9QSHUXpbDmZiAgIm7CPCHfPnwlIZDAE,1011 +pyarrow/include/arrow/util/bpacking_default.h,sha256=nDi4g5JdyWwXa_J3EqE22bG9R4G7Czd6W75F9spRU5U,103760 +pyarrow/include/arrow/util/bpacking_neon.h,sha256=vE-V4E8dpqSjk7dq8kagD07-nhRQKGvcYMhc_dE4nqg,1009 +pyarrow/include/arrow/util/byte_size.h,sha256=Pd2c_3a0IeSOUevhPIlXNkDmgoB06g4c9YCsuRwwSKM,3997 +pyarrow/include/arrow/util/cancel.h,sha256=oW33c4AXSKLHUc5R_1mZ4ssjmLXU_P0Jk6GDO3IwZUo,3651 +pyarrow/include/arrow/util/checked_cast.h,sha256=SR9Qg8NuLSBJw2w1UfgeGvCfT8k7wrbN7BzADQOZfAU,2076 +pyarrow/include/arrow/util/compare.h,sha256=OLrSSyllkY4Sv00IK-37A2d68gr4OwnWJsxn1aF9xTU,1982 +pyarrow/include/arrow/util/compression.h,sha256=fvlURoWJsgO8Hr6Xs_VNaqiOatmIGn9ktVUkYv7pIu4,8427 +pyarrow/include/arrow/util/concurrent_map.h,sha256=wMi9WDHfRuJ_aSFgcJPpsVwGJ9vIJ5agaZ3rVUlwGe4,1775 +pyarrow/include/arrow/util/config.h,sha256=mOxSnyhdloirkEGXgsIz909jMcR92JmNkQmQnVD9mA0,2239 +pyarrow/include/arrow/util/converter.h,sha256=PILfos6VlnLK6fOFMfLIUhiKl3o1dJo9T4HJXeR7V5E,14637 +pyarrow/include/arrow/util/counting_semaphore.h,sha256=iXHYagqi_-ay73T1uPmv7pG334SY34DUQLSdtD_4_tA,2251 +pyarrow/include/arrow/util/cpu_info.h,sha256=MqLdJabBZkzDjiScaQ7if9dmoAGvXT2QavGoGkho3lU,3964 +pyarrow/include/arrow/util/crc32.h,sha256=4gN0M-SRnxaGKci2ATPbMWZG2TG3YULXjaTpadV0Udk,1337 +pyarrow/include/arrow/util/debug.h,sha256=CPB_oDOuZ_u89e9wM8bGn88mGvClgfa7UDxDph6v9sY,971 +pyarrow/include/arrow/util/decimal.h,sha256=TxhEsMCx4nwrqBzpbMFWTleivyATDrkwFIqm3ZHKJOk,11494 +pyarrow/include/arrow/util/delimiting.h,sha256=JYe9YcWMeFT_ISuojx_VgVqOYLvZ2TiiR2sNn-WdeBQ,7317 +pyarrow/include/arrow/util/dict_util.h,sha256=HipvAVlQ1Q6zNneu9tYOwVUv6NLklBu2IfZ1eoeSpVg,986 +pyarrow/include/arrow/util/dispatch.h,sha256=g6R9w8asCTRyDTFoxUipvdOeh6Ye_FvZBGP6Zwg2t3M,3235 +pyarrow/include/arrow/util/double_conversion.h,sha256=23QU2TFX4hpBZnoqMDyTKxZoH7mU9qkY2vkF1KL8bW4,1243 +pyarrow/include/arrow/util/endian.h,sha256=iIIfAU_NQ-G5-7tiQ6hcN8ZNi8c7-8a42ZQWf4cNPfU,8114 +pyarrow/include/arrow/util/float16.h,sha256=RaJBIWnDdqj7uw2YskxBM0Wlpnrq7QRbMCiTZLr7gJY,7418 +pyarrow/include/arrow/util/formatting.h,sha256=fJIYN0UalTHBV-zFe9VUurM0dUbruPLRGxpXrJbw-do,22188 +pyarrow/include/arrow/util/functional.h,sha256=4ljKXSWX3G_lBT2BfLXuG44pzZwVKeaojpLWCniqKyc,5612 +pyarrow/include/arrow/util/future.h,sha256=8lzJwzmQRWApV7XYRizx81Q2-Lh-T5jxNOL0alCVM1I,32307 +pyarrow/include/arrow/util/hash_util.h,sha256=JoB6fC0Wbk4p5Fkrz3QvwUUiUxiy2_fHesE_j7r3hvM,1914 +pyarrow/include/arrow/util/hashing.h,sha256=mFHU6LK2RoH1drC9CV2Af6x0VPLH0yFrsQ3zo-XoR2k,32890 +pyarrow/include/arrow/util/int_util.h,sha256=zTOAq57M4pUe469WpnW6I5hNtxe3vGRHlZWhngA1DzM,4859 +pyarrow/include/arrow/util/int_util_overflow.h,sha256=AtvkG7v3-1gVzW5SrFrdVkYuXFtT76_nxrKtzIbz_9U,4895 +pyarrow/include/arrow/util/io_util.h,sha256=5VnX4eU74Dr_2dXyBSpYDevlTva4ZgD_un3SwoNF3gQ,13705 +pyarrow/include/arrow/util/iterator.h,sha256=z-PyTb1d5EvBE0qs2s-btqvb5gwDWV7bTjl5JzaTVME,18101 +pyarrow/include/arrow/util/key_value_metadata.h,sha256=wjU6uQGcSmy-YFqMs6rwLP7E4X-0IFBjPrWZstistzQ,3590 +pyarrow/include/arrow/util/launder.h,sha256=C3rNBRh4reuUp8YuRdGQU95WPc8vl4bAY-z5LXgDiuA,1046 +pyarrow/include/arrow/util/list_util.h,sha256=_OmtsDqe-mnZ_7tVWxB2yHdgCJhpiME_RP3nXHzKbdI,2028 +pyarrow/include/arrow/util/logging.h,sha256=hNuWh1jR5DS47XpWQFTXry5DJyuo_5590jBxbBHfD0c,9100 +pyarrow/include/arrow/util/macros.h,sha256=xpwBEodiK1A3rONrrGt4nHG9XTuX1MATZdMyKR5BbHE,6401 +pyarrow/include/arrow/util/map.h,sha256=KbKB3QNc3aWR_0YU1S7aF9fdI0VCABGxEF1VES2oOqU,2476 +pyarrow/include/arrow/util/math_constants.h,sha256=5rV1HCUDq_y4t3dVdzA5EW6wj56mDWsv5tPBTHmNci0,1106 +pyarrow/include/arrow/util/memory.h,sha256=qsxFgvj_wozO5OxIs6fHdcam7aifpozqc1aE81P91Yo,1566 +pyarrow/include/arrow/util/mutex.h,sha256=n4bsrHK2Q8zbYsQEyNaFqNu__vvqgwo1AfrLLCxfkpU,2554 +pyarrow/include/arrow/util/parallel.h,sha256=iZBn0C7HkQhGNKET5WTXCJ2FftcryCZAyBGwcg7qRvo,3616 +pyarrow/include/arrow/util/pcg_random.h,sha256=nbXowfCJFiy4GjVfF9I8VvB6fxkyR5zNB1FKdnFsYTQ,1252 +pyarrow/include/arrow/util/print.h,sha256=X0CfuWzDkq8CNHaEUH3I27Yi4v_zdoOo7sdrTad8Wr0,2444 +pyarrow/include/arrow/util/queue.h,sha256=X9vRZQX3YL_a2Lzwe-zcNNHguR7FoGYmD-Q0THqsCBM,1017 +pyarrow/include/arrow/util/range.h,sha256=yhe5pJiZIiLUO8tYr408Y9yEsFrFd7FrBMeTL2hAOKY,8526 +pyarrow/include/arrow/util/ree_util.h,sha256=waTBOQfwWGHhoAYHTyyhUnM2BSwOqsof_H_akHvUgno,22395 +pyarrow/include/arrow/util/regex.h,sha256=Tj92CttOh2HxS0EKQ_9-sxMBAsQrDOUKNP0ngIJFdP8,1742 +pyarrow/include/arrow/util/rle_encoding.h,sha256=HJc-YVpSwgv5XqMISRPTwv-CL5NR6O7aXn1IKqj0al4,31035 +pyarrow/include/arrow/util/rows_to_batches.h,sha256=PZNoLeMCfJJdeHVvUny0UHc5AtS0hctUCi7zUztJpeE,7120 +pyarrow/include/arrow/util/simd.h,sha256=mK8BJY0MQ62vhb0mls1ss3la8aOchfPeRarRaUUcaXQ,1227 +pyarrow/include/arrow/util/small_vector.h,sha256=dDNNMFpNdtIbxLP3L-h_bv3A8raYv4IVuyLEzUVMgck,14421 +pyarrow/include/arrow/util/sort.h,sha256=cXZvBN_EcXkN5j0xhX2oNisbChT2QKXP9KzDgjXW2_M,2466 +pyarrow/include/arrow/util/spaced.h,sha256=790FFCTdZA-z6qKuEJM5_wG24SqTTVtyj7PKnLBe7_Q,3567 +pyarrow/include/arrow/util/span.h,sha256=2zDPUc5ciTQovM-T32EZt4iMpqcsoL7Y46ovKjo-7ro,5551 +pyarrow/include/arrow/util/stopwatch.h,sha256=ADGbEEU1x-fvp_NsIdTHH5BW0b9jDB8rTAj1WOgkClc,1401 +pyarrow/include/arrow/util/string.h,sha256=tqv-0HFYpdrXQhkuoi_tUVVe_KwII2ffvYZqCg8jhm0,5754 +pyarrow/include/arrow/util/string_builder.h,sha256=UwOKPz8BQjtl9ecBZ0INoYWMWUkAVQOd_aC8xZZMCgo,2446 +pyarrow/include/arrow/util/task_group.h,sha256=fI330NoJT8u84AEUA6pSxWrE7UBKn2LaM4DfPFoalqA,4362 +pyarrow/include/arrow/util/tdigest.h,sha256=C53yooDZmH5Q-mWIN1PmOtk8fw9QZOe8M2oe22LgfF4,3052 +pyarrow/include/arrow/util/test_common.h,sha256=ZniLT8TvAUdCE2T2YrtlDKdwDNPBpT5e9V1EiPHH9LU,2837 +pyarrow/include/arrow/util/thread_pool.h,sha256=lj2-qrZLIpmzK1wiy3Sqk-vshEsKF3nbs_22Btffuhs,24424 +pyarrow/include/arrow/util/time.h,sha256=4Xi8JzaYlWFxVaenmCJ7orMgu4cuKELvbtMiszuJHUA,2988 +pyarrow/include/arrow/util/tracing.h,sha256=sVfC_Rj2gwkWKVSKT0l0FOO5c2EGsfYwlkZX4d9ncxA,1286 +pyarrow/include/arrow/util/trie.h,sha256=WBvryYO2sNdoPc-UB-XmQ3WzSed79qIsSg7YWCrvwNY,7121 +pyarrow/include/arrow/util/type_fwd.h,sha256=aC3ZZR2FniFUR3InlZDXH8dknZKvmM0RBocHwFKU_Us,1521 +pyarrow/include/arrow/util/type_traits.h,sha256=F0Gdg_3faM0MmZBOXOspRzUwuxnjKbFaVpJiTEaOXGU,1731 +pyarrow/include/arrow/util/ubsan.h,sha256=dJNGOe0smDe1akrYLdYcIbAWDJNS6Z7NRgqgDnr2emc,2765 +pyarrow/include/arrow/util/union_util.h,sha256=PSssBiw-v-PDen_q75c6OkNO5PwyIPhGbf9PMJj7P2M,1211 +pyarrow/include/arrow/util/unreachable.h,sha256=O1TG4ozCYT3_xvDpJouKWrlFADIEpIemQ28y4DqIwu4,1070 +pyarrow/include/arrow/util/uri.h,sha256=D24zebazFcrKGt7iGpkcGQ87DuF-2AbjPKVkDlq9Nuk,3886 +pyarrow/include/arrow/util/utf8.h,sha256=flGZ786kHo33Xg_zw0zVA9GAT8jYdPUHTVhIPHGjOj8,2031 +pyarrow/include/arrow/util/value_parsing.h,sha256=ypbnIIxfFDfDmELinEiS2RYSkeabYDAfuKPW5YsmfRw,29995 +pyarrow/include/arrow/util/vector.h,sha256=qxHjRK_vkXEDyGK7ijDom0E8BHIx-1WLso368A4jqSk,5706 +pyarrow/include/arrow/util/visibility.h,sha256=rxdsOf32jameCdyNg0bV5V11_mPDyq7p_dBIi05ovgQ,2683 +pyarrow/include/arrow/util/windows_compatibility.h,sha256=m0OUxpZobM0vYUyVPRw8-iF9rcif3ZPIe__adJ3WjuE,1225 +pyarrow/include/arrow/util/windows_fixup.h,sha256=Obc15XBzd4Vag6rKP0QOFpdKh2NdMwycASiTTKo3tIA,1379 +pyarrow/include/arrow/vendored/ProducerConsumerQueue.h,sha256=Bz1ks3NDgXXLfT8TMUkE38RpMOSwKRRtwU1e37Y1CUw,6101 +pyarrow/include/arrow/vendored/datetime.h,sha256=lVHO-GyyevnRnc2XmnRS33plbC7FGKcPJk0jnWrgLxw,1017 +pyarrow/include/arrow/vendored/datetime/date.h,sha256=pXYcCe04WF6LoW5uZzm912GVvNtrOBI09sqZJeXZSAY,237582 +pyarrow/include/arrow/vendored/datetime/ios.h,sha256=SSzUcU3-1_slQ-F8dS8MPMdKyhSmXKFmvSiUF3Wuaoo,1679 +pyarrow/include/arrow/vendored/datetime/tz.h,sha256=mPSfEbZ8arAGtk1XwvVgyRk6aqLX42cFDggb4okFwb0,84867 +pyarrow/include/arrow/vendored/datetime/tz_private.h,sha256=1-h_bcUADjplYsQ4TwuqEeWvVThgHXyLay1wAT8TBlU,10748 +pyarrow/include/arrow/vendored/datetime/visibility.h,sha256=2P38U5rN_wE45fGYqkAqh7P0XLj2eswzz8RgSRJ0c9s,951 +pyarrow/include/arrow/vendored/double-conversion/bignum-dtoa.h,sha256=imGhcg0RywMsFNMYTqp6rlXw2HZCIAla8SC_n92gCqE,4358 +pyarrow/include/arrow/vendored/double-conversion/bignum.h,sha256=RnQ2CPL8Pt6fVCGh_8VDF11e_GyrrwO0IH0uMnTcsEs,5949 +pyarrow/include/arrow/vendored/double-conversion/cached-powers.h,sha256=jjwfR3bue7mNlE5lbTrFR2KlgjRew2OkmjBa7oQO0Qg,3079 +pyarrow/include/arrow/vendored/double-conversion/diy-fp.h,sha256=J-RgqH27jspT5Ubth9pTA9NAZH6e7n1OVhxModgi8Sc,5088 +pyarrow/include/arrow/vendored/double-conversion/double-conversion.h,sha256=J1Tl5-8aFY0A9SnaA9z5Q90jnMxw55illPIuE-jdD5Q,1804 +pyarrow/include/arrow/vendored/double-conversion/double-to-string.h,sha256=C-tKRi0IuLycXgS6CC1oiFkCroOo_-AO0VOjmfe0tlE,23925 +pyarrow/include/arrow/vendored/double-conversion/fast-dtoa.h,sha256=ZAho25fqeP3t2RM0XgqfhTBXQIIicACLpdyHHMRX3JU,4122 +pyarrow/include/arrow/vendored/double-conversion/fixed-dtoa.h,sha256=HLnpxkHjKldm-FBiDRbADYljJBSYbQGP4Gz-sVbiSJU,2828 +pyarrow/include/arrow/vendored/double-conversion/ieee.h,sha256=CVKA9RXSjv4ZygqDHMiF-H2hUh3QHQvp1GZYC3MAhkE,15281 +pyarrow/include/arrow/vendored/double-conversion/string-to-double.h,sha256=Ul6b-2R0pjUaAWNM3Ki4kH933LqrW6_XfPz4BSiE2v8,10906 +pyarrow/include/arrow/vendored/double-conversion/strtod.h,sha256=6xCRm47vmcghYJug5mhhTVbsZ3m3Y6tQfMehEyVZNx0,3096 +pyarrow/include/arrow/vendored/double-conversion/utils.h,sha256=wFRb5cGABiNoUSCnvKmdv_KIMcBtX1PX89tPFfvgbQI,15614 +pyarrow/include/arrow/vendored/pcg/pcg_extras.hpp,sha256=FEYzq8NFxPfdJyLs4kVtTBLkaD6iO71INz9EJnaxTdc,19784 +pyarrow/include/arrow/vendored/pcg/pcg_random.hpp,sha256=7TaV3nZhcwpf6XxlZ6cod1GaW5gm-iUn67t2fiMPNbA,73501 +pyarrow/include/arrow/vendored/pcg/pcg_uint128.hpp,sha256=r8exMtH21S8pjizyZZvP8Q8lAdxkKF22ZEiurSTFtzM,28411 +pyarrow/include/arrow/vendored/portable-snippets/debug-trap.h,sha256=9KphJ9gRtDT9DXR9iZ7aS23xa2T8tLmLsFEJMg0pLDQ,3081 +pyarrow/include/arrow/vendored/portable-snippets/safe-math.h,sha256=q9yWh34bsFu1vSqLTuI3n_cIU4TlY98Lk1elxKHvZP0,48167 +pyarrow/include/arrow/vendored/strptime.h,sha256=q1IZi5CvyUp_PNzbQ4_XLroAV24VEovBEz2TkpwUJ9c,1212 +pyarrow/include/arrow/vendored/xxhash.h,sha256=MUwtyzu7xjkx9mBcS65SaDcCK7tgeqQgj-KYEMxcHWc,844 +pyarrow/include/arrow/vendored/xxhash/xxhash.h,sha256=videnbIaUDw38kaDzbSQjyNwo-NauW4CxOpz3I45nEM,253096 +pyarrow/include/arrow/visit_array_inline.h,sha256=XuQjuME8XZeJp7W86YuCsuoVVgmG1NulXAA0KJkmmB0,2446 +pyarrow/include/arrow/visit_data_inline.h,sha256=NItU41fqR6Y5XesusYJo53kvL4z2i1G7Fe9WVO0vuFI,12405 +pyarrow/include/arrow/visit_scalar_inline.h,sha256=KvNY0j8nE9gs_805LXMV3ATgvxvUqW4UeKpXUxR3rMA,2419 +pyarrow/include/arrow/visit_type_inline.h,sha256=45aoF8APn8hm909nLBngls669o2yKCn24WlL5XdDpa4,4397 +pyarrow/include/arrow/visitor.h,sha256=Fqhj_E88K_VbNYQgYk4W_kzvJxn_8glnlbzv7tn6bDs,8372 +pyarrow/include/arrow/visitor_generate.h,sha256=cZIqcfS4zRgPJDpNe0HMzhnevg3J8zy_dNO29QC85A0,3224 +pyarrow/include/parquet/api/io.h,sha256=Ricq0d2R4QXHiGZCbjxZ_0F_QmKq0IrfTidNu5NoXPI,847 +pyarrow/include/parquet/api/reader.h,sha256=vnM5XDPn1TVsDJk4SDgb3ZU2Ta4vdrRzCpDWO90rYHk,1204 +pyarrow/include/parquet/api/schema.h,sha256=KsNJ529pEh7bGUa0rLUCcfanI9rW2uSTirgpvKq0hdc,855 +pyarrow/include/parquet/api/writer.h,sha256=UJZbY8QGVRMtAmozzjoM9TnI4gssqlNFUKCXBw2IfuI,1007 +pyarrow/include/parquet/arrow/reader.h,sha256=l4R351BVOWpYJOv_vyqWmXdJUErm2z_ztvTAv537q0w,15305 +pyarrow/include/parquet/arrow/schema.h,sha256=Mi56ul7itNS6NDbMpKOJCufjHVqaSY5_rbsNRNLE560,6204 +pyarrow/include/parquet/arrow/test_util.h,sha256=Dpg-9IyHsCjpKOZ3KJ4Vm0Ip2XXYLPUWTR_6sqv3Z00,20301 +pyarrow/include/parquet/arrow/writer.h,sha256=p6EpISL98afbtfMQjoDNrIDLAf9SowplEQAvaEBIbKQ,7391 +pyarrow/include/parquet/benchmark_util.h,sha256=RhFvoDBVyfd5Sv0fm9JO4JrXWJRGYYmIIrHXi0cSJP0,1756 +pyarrow/include/parquet/bloom_filter.h,sha256=mO3RRfedWWcHiCCTj0FIQ-jQThkWs2Psym67oICdutU,14987 +pyarrow/include/parquet/bloom_filter_reader.h,sha256=63kpHYKs5TPrbRamkBLZsDYbD-I9UeVhF-R8d7JHeLg,2892 +pyarrow/include/parquet/column_page.h,sha256=N0UAaE4_RF6psGs5W3MNOLcG_eY-SvFGcpnbdZCwRk0,6514 +pyarrow/include/parquet/column_reader.h,sha256=wud7kLzVoE6MoARv_CMgRVOlmDmVLuhV6GanmnqjDWs,21549 +pyarrow/include/parquet/column_scanner.h,sha256=HecBvh-z0n_1HJsD-GIdcGHQAvDOHKlLzppB9RBsD9s,8863 +pyarrow/include/parquet/column_writer.h,sha256=-A-fGSU3oFnzQHLGSU7jw5_VDPZeqdA9BwTGe_gK1h0,12588 +pyarrow/include/parquet/encoding.h,sha256=ztv-jselHsgn7h4bLGp6ad17Q2C7zPMbqMEVT2Tw2Eg,16835 +pyarrow/include/parquet/encryption/crypto_factory.h,sha256=RT4iznr6uvSIPbUzh_7s6Cexe8uMbQkzgrjCTGYBC6I,7057 +pyarrow/include/parquet/encryption/encryption.h,sha256=xdyjH-lvAdjiHx0kYz_uc39d9EfnBaMGfLEgdxNw_bk,19652 +pyarrow/include/parquet/encryption/file_key_material_store.h,sha256=YzAVO3M2H5v5Fz2b_WlmB3GE5wVbMEnFTL3S9XPH6k0,2200 +pyarrow/include/parquet/encryption/file_key_unwrapper.h,sha256=pB30St8lGEaEAxNcwnDnlGtATTvc1muMzNOusfgqzT8,4635 +pyarrow/include/parquet/encryption/file_key_wrapper.h,sha256=d2W4xICbSRAy7aPe5RKahhPhiJDfvxHY_v_lifq7wqY,3762 +pyarrow/include/parquet/encryption/file_system_key_material_store.h,sha256=9H1ey0O3LL4dg9VVeFLNxlZ7Vr263JVaZHKVSu4s8MI,3573 +pyarrow/include/parquet/encryption/key_encryption_key.h,sha256=0c3ZrRud2vrCu5z513ocyPYxlsP2kg1fQ8m0Jqr701g,2232 +pyarrow/include/parquet/encryption/key_material.h,sha256=kPTSIuRFYOnH4BCPIB33zG9hp5D2Ba-5kZVlq3rFnRI,6221 +pyarrow/include/parquet/encryption/key_metadata.h,sha256=Pc0nA9LW3Fc9NLMMxz7osbw8si2jSiOVTES-J-9R0y0,4003 +pyarrow/include/parquet/encryption/key_toolkit.h,sha256=HPabI8qFnIMgxZYhHgXCzYV0LU1c5yJ16xjUx21I9b0,4577 +pyarrow/include/parquet/encryption/kms_client.h,sha256=D34pVHzkCbWqKnPIBYfs6cONxmuYzyLSS9-C52ZFhz0,3151 +pyarrow/include/parquet/encryption/kms_client_factory.h,sha256=VZ97CMgDQxx5oZWFGprjXsaM1hZ0wNudPmFU1_lniAc,1293 +pyarrow/include/parquet/encryption/local_wrap_kms_client.h,sha256=XZxkEct0-Tv93VDpda9sDou1kp9qkTKMxr36bpVcI8s,3954 +pyarrow/include/parquet/encryption/test_encryption_util.h,sha256=zIGeULeTOCU1N-XYHdvIppth5wnnTYEwf2h-OuTcQZQ,5209 +pyarrow/include/parquet/encryption/test_in_memory_kms.h,sha256=jYc5WPsrh_wcaaaWcjf23Gbiye3a_bdg2royUfukWEs,3521 +pyarrow/include/parquet/encryption/two_level_cache_with_expiration.h,sha256=cuHbX9gBWWyd0IPXNVjMmHxjPw7omYTns4If4YhBgSM,5075 +pyarrow/include/parquet/encryption/type_fwd.h,sha256=dL8snyUwNjhTQE2FQ2dXAUjTboEXhH2JOehQovHfixc,955 +pyarrow/include/parquet/exception.h,sha256=35JamgHFXBywQt1CKexG-YRNNTh1hmKFHVASQ4-WkOE,5597 +pyarrow/include/parquet/file_reader.h,sha256=OFRKhwAww2N24aZOZcznzral1Or1TGIFGRd1aACARLQ,9664 +pyarrow/include/parquet/file_writer.h,sha256=gL6WCTDJnNe_og_Q1Hz3ZWqVmdxJE8UNGaKH0oV_5Kw,9340 +pyarrow/include/parquet/hasher.h,sha256=HSY1EjPD2xx_dB9HtAg-lXL7hB4j9MDE0cAlR7u0NOc,5227 +pyarrow/include/parquet/level_comparison.h,sha256=5z4fUJJPWq9W60l2CsAI7T7E2auGYD7m0fpR5rfLmsw,1306 +pyarrow/include/parquet/level_comparison_inc.h,sha256=YId1vMukGFmnON8HB7SZ6mVK7xj7AU_xs4OvvUMCgSs,2492 +pyarrow/include/parquet/level_conversion.h,sha256=OsuqK1xiUnEnOLPKwfm9X-pXTaXRMlDIkj3lwGb2ggI,9432 +pyarrow/include/parquet/level_conversion_inc.h,sha256=vOC0sBiaW89xfvIB67r5zDDGPFBjAPezLEdqsZWCX7s,14147 +pyarrow/include/parquet/metadata.h,sha256=3TfTUjof2LJJf4s9MvGPeZKEJdbNb_2sB5bjwFrfeeA,21056 +pyarrow/include/parquet/page_index.h,sha256=qBKqiq131jCUrtFCfwlBkeb8PL96yOPKg7AqkslnM60,16399 +pyarrow/include/parquet/parquet_version.h,sha256=1T8V967-JhLrB7nARPaeBiVke4NRXzE9wuJ3VfXsBuU,1164 +pyarrow/include/parquet/pch.h,sha256=zIdkjZS4kuFYra3woGMjmvYXCwB4IaXdpm_nR5Nz8hk,1249 +pyarrow/include/parquet/platform.h,sha256=78EMbNC5KaKv9IH0gj8Pwag7TxFItJE4Gxrjxp7JEpk,3820 +pyarrow/include/parquet/printer.h,sha256=_sJ5IoEj4naSTWxlhbq2Pc6WkNG3wMuxRy8zfKfsAJ8,1540 +pyarrow/include/parquet/properties.h,sha256=SYLfEXkF_y-2l8aVqr95ykDbsYADuzANl81NIrX7QkM,45676 +pyarrow/include/parquet/schema.h,sha256=YrlxSFY2STcgKx9Gr6oZpwf4-CVXJ45td4Xv38ONvhQ,18129 +pyarrow/include/parquet/statistics.h,sha256=Bcby1WIkko8RcTD7x8PfTpRIAcoZ9B-PfH-6LELNqqA,15175 +pyarrow/include/parquet/stream_reader.h,sha256=1WmN0vYCqTz1Lwb_Di4xPWTE-VbCQQuzZralSpWQm3U,8791 +pyarrow/include/parquet/stream_writer.h,sha256=nw_v3nhrL682ozZ2KZKVkHnOsjwexbmBXTV2CKcq4YQ,7505 +pyarrow/include/parquet/test_util.h,sha256=gkJoOl_N4cG3L56uXVJi1RLiDVBl73yX01Dkx2Plt9g,31180 +pyarrow/include/parquet/type_fwd.h,sha256=qx6Dhg1HO0U99jdiUfu3rC7zhmQ-3i7WXsfEhrza3rE,3046 +pyarrow/include/parquet/types.h,sha256=-zb_821HBHg7tvgfvA1OnaL7K9Wq0cya5oou5ZZESTY,25254 +pyarrow/include/parquet/windows_compatibility.h,sha256=xIEGHW354URgdIP9A4V303TJL8A1IkCEvp08bMKsHTU,897 +pyarrow/include/parquet/windows_fixup.h,sha256=Fzu3C5JssjEpTpLJu73ETKNJSUfA-qSH4-jPjGxhmZ4,1044 +pyarrow/include/parquet/xxhasher.h,sha256=QAa7ZE7S3UFtU_Voz3oi3YclIYhbhviJkafLOYgiuWg,2074 +pyarrow/includes/__init__.pxd,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyarrow/includes/common.pxd,sha256=tYI1M3gk_d-uzNUpLcIxhNG5W67ycFSVb36Tv7hyN30,5452 +pyarrow/includes/libarrow.pxd,sha256=ZvpTmEuG-xNapBytyKwgKHr5FVGJQNLeXZT_G6ulOWM,110893 +pyarrow/includes/libarrow_acero.pxd,sha256=c84RdYfIuFWW_36-1RELJsowfQwXhgUxbdC_xKQyFCI,5298 +pyarrow/includes/libarrow_cuda.pxd,sha256=uq_aA-CyWxWrww427RooWleBzT1c9Et4XWFnByh_Nh4,4841 +pyarrow/includes/libarrow_dataset.pxd,sha256=Qo8zXs1RbjTe9Ja6FSDm-_ujKRadNblfFnX2Kz95cIc,16786 +pyarrow/includes/libarrow_dataset_parquet.pxd,sha256=4me_u82JiInHNRvoazLXUTOO5sxVnyCk-BdfsYQZyWQ,4536 +pyarrow/includes/libarrow_feather.pxd,sha256=MTJUDQbfKP8Ir700Fobl7xcbjX7WcrsUV4mxFXlfwn0,2140 +pyarrow/includes/libarrow_flight.pxd,sha256=pcVtpB4Rx81RZoG3afIizmyQuTnckrqIPZyjvsIYYKE,24860 +pyarrow/includes/libarrow_fs.pxd,sha256=hAxZqAGpG5w6Gl_peszkQAHNtLuYgaHVxZPPjnhvW6w,15178 +pyarrow/includes/libarrow_python.pxd,sha256=1YXhX2LrDOYnkjqSgQ6R4B_2vho6QGivAlPrtb1PDeA,12251 +pyarrow/includes/libarrow_substrait.pxd,sha256=5ZJ0yHhM54I1GfmUaPMy5nRxLFsr-A625qUSmOhnQO8,3196 +pyarrow/includes/libgandiva.pxd,sha256=FLBd99IeU67Db9SnHS7oe6FgBZ1aIHuRc0pOiDv7hQc,11538 +pyarrow/includes/libparquet_encryption.pxd,sha256=fi3QrLpHN1_IaYRXvVMJdIgp7F_6aaLu1owP0I3BD5g,5898 +pyarrow/interchange/__init__.py,sha256=DH0bwbKpdjD1WCW1VinnXEuVLY098uHKkirv7DFc9JM,845 +pyarrow/interchange/__pycache__/__init__.cpython-310.pyc,, +pyarrow/interchange/__pycache__/buffer.cpython-310.pyc,, +pyarrow/interchange/__pycache__/column.cpython-310.pyc,, +pyarrow/interchange/__pycache__/dataframe.cpython-310.pyc,, +pyarrow/interchange/__pycache__/from_dataframe.cpython-310.pyc,, +pyarrow/interchange/buffer.py,sha256=NF_GU1uQ6INqHqCwzY6XQQqRxKDh6znEeDHiRqaEIQ0,3359 +pyarrow/interchange/column.py,sha256=afU794n3H7yf4gDQDuFLbtyDlgVnLk9iZ6sugb0h8_4,19370 +pyarrow/interchange/dataframe.py,sha256=tmSMmBvBAc-ZSUzE8tBNbvQLHuuxLuBkMkK6KYwtS8M,8405 +pyarrow/interchange/from_dataframe.py,sha256=JfkP4wuY_9x76H6RDtmsOzs6B6qe-1WS7zxpKeD481s,19709 +pyarrow/io.pxi,sha256=5PQjZeMPgoZOn_dgkpzHLbAuNWKv2OFmVI31R_7Ubcc,83485 +pyarrow/ipc.pxi,sha256=rOpeOF8BhxlgxtbIoFTnhqtjdkf7qROy-4wIIWOcxtA,40847 +pyarrow/ipc.py,sha256=Hb3qCPKRr_wth5u4WrkZHJyAZmIK5SoVSezfBOI97Ww,10107 +pyarrow/json.py,sha256=N9Y7_3TSrOEDy2OrmgQ8UKqUPMx1Bm9dYgot-brJ8Xw,858 +pyarrow/jvm.py,sha256=tzAsIrMSCIeNAtSC8lZWjQS0rq7kjaQDPlePDmvpqDw,9593 +pyarrow/lib.cpython-310-x86_64-linux-gnu.so,sha256=O86KsstN2M6dhJ-asloKRpBD4WkoVFOY0hpq-7EnoF0,4528352 +pyarrow/lib.h,sha256=_XgqZ_3T5W9PqqlxWG6Mkekxgy-j7YV7LdBBQzpc6gg,4631 +pyarrow/lib.pxd,sha256=9MsvXe9jR5SQEDridrCTMoUea-hBHESknxw-FMNpHCQ,16918 +pyarrow/lib.pyx,sha256=ueJwpi2TYGu42uNKvPnv36QsWKkvq69eUFNWS9WB8Y0,4873 +pyarrow/lib_api.h,sha256=3YSCQN-GF_-nWfkpEkQmR1Q-Lrk2XZUqk8o1_ttBkgw,19487 +pyarrow/libarrow.so.1600,sha256=2FpKbRUO_O55xM1TyIpaMf0_b2794-e9Q5zY9IgwJK4,67913016 +pyarrow/libarrow_acero.so.1600,sha256=4h1tjdqnBGRJkZoTM0aOCfptVJ8PZtmq_WgvnJeauag,2077120 +pyarrow/libarrow_dataset.so.1600,sha256=0jXN-0yECYiqSq58S4uOIfFSSraj5Clq_YVMfTPTr5Y,2753864 +pyarrow/libarrow_flight.so.1600,sha256=9zgEltL931G50f0GcRF8irB_LK5rKw3AGGpGyruOeFk,19654024 +pyarrow/libarrow_python.so,sha256=FKhyLX7SPvYGZXcrOrnwleWJgQYcSZJe0PWKffpSnYw,2845200 +pyarrow/libarrow_python_flight.so,sha256=g7VL4Uxa7w66WqgpYNC6cdlC5bm6-RbS0riadBVWI4c,117384 +pyarrow/libarrow_python_parquet_encryption.so,sha256=s9TQQp1aX2mcLRTXBBqosXIGDAMJXGUn62SoKA2iLa8,37680 +pyarrow/libarrow_substrait.so.1600,sha256=qHs-WskZBPyazoZQBS1wRxHddFY6-5yvmMpYm3GZVJI,5332496 +pyarrow/libparquet.so.1600,sha256=Ddm3SbwCbr8jbrYFcboTVSZqmGIQiSu4qvKmvrgUa9g,10932648 +pyarrow/memory.pxi,sha256=9AVMENxqaV0Ndf9tYSiakunEpMRRCZNT9d-PnrY8r14,8229 +pyarrow/orc.py,sha256=IjjeGAEZl0KhHvwy3YsSGfTWlx7Ilb54P0tFKPvwcfk,12618 +pyarrow/pandas-shim.pxi,sha256=d3Z0mki6n3QUTzCOJoEhvgUBcCIcWPsuBli65ZQ_gBg,8178 +pyarrow/pandas_compat.py,sha256=voSdrJhZyfPWywQh8S38aVas7h1NMQAlfaSVa-cGASw,41260 +pyarrow/parquet/__init__.py,sha256=4W64CbvwvO60tG58nfNtyCwMVCfuPumtu82p-kiGPaE,822 +pyarrow/parquet/__pycache__/__init__.cpython-310.pyc,, +pyarrow/parquet/__pycache__/core.cpython-310.pyc,, +pyarrow/parquet/__pycache__/encryption.cpython-310.pyc,, +pyarrow/parquet/core.py,sha256=wgYvAIxxm8CjFtxOt6WsOVfO3xJeIeyIh7lwGfjC07c,88815 +pyarrow/parquet/encryption.py,sha256=-XW7Qcbl-jQhpZsR610uQ8-z9ZVE_NL045Jdnp1TZ9M,1153 +pyarrow/public-api.pxi,sha256=KoOChsKDEBdIs6omYWdgLOVYaJ9DmW4U-iW3JvNwaeM,13459 +pyarrow/scalar.pxi,sha256=6o1zTwl2TfYw0CsOfRMMyeCR9cH4MMHwlt38OyTVmZU,34501 +pyarrow/src/arrow/python/CMakeLists.txt,sha256=xDTvjlANKrngvBpZYNrR1lb1LduggXLAYe81ujk9poA,828 +pyarrow/src/arrow/python/api.h,sha256=W76VAxYqOxi9BHJddji1B62CmaWDFuBhqI65YOhUnGQ,1222 +pyarrow/src/arrow/python/arrow_to_pandas.cc,sha256=6d5ecl2aET0C1brkGX0P3DImmPdzzqQhFpmVWLgx62Y,95493 +pyarrow/src/arrow/python/arrow_to_pandas.h,sha256=jUBEUMKXw70oJdMlgkSf6HitaNweQcc7hxI75_C9WSI,5561 +pyarrow/src/arrow/python/arrow_to_python_internal.h,sha256=nQXPZTL3xa4Sm-a-Gv-8bpFs-qAOZHkqWmA_m-dSLVw,1740 +pyarrow/src/arrow/python/async.h,sha256=C0f8YYmgwBGgDau4xEFsdjukiZB4YvpylETHEZryHOo,2352 +pyarrow/src/arrow/python/benchmark.cc,sha256=z6qYRx4qMuNXPaC8fuPJlQd92aosMN85u1aD50R1-UU,1293 +pyarrow/src/arrow/python/benchmark.h,sha256=f-kzyMOlPKDse2bcLWhyMrDEMZrG_JHAPpDJgGW0bXU,1192 +pyarrow/src/arrow/python/common.cc,sha256=_9ozIRo_WTDWovBKqOVyX28d0IttHvwW9MG-PkTzmKc,7591 +pyarrow/src/arrow/python/common.h,sha256=yjljfJK1f7slZ7DBQ4LTo_pob70zioswJNWazy0p-uM,14412 +pyarrow/src/arrow/python/csv.cc,sha256=ql5AY76AqiFksWsrmzSl551k5s9vS8YcmypM2A9rhw8,1803 +pyarrow/src/arrow/python/csv.h,sha256=QxU3B-Hv_RsoEcMGS9-1434ugouL2ygC64Lq6FgviNM,1397 +pyarrow/src/arrow/python/datetime.cc,sha256=_VKRKeyFqR7Xzay2wazcveb7mgOv8K37ebMomNY__lQ,23001 +pyarrow/src/arrow/python/datetime.h,sha256=6g9oTv63tvGlc4WH6kfVtfUocFyy0KYr664SRdb3pes,7927 +pyarrow/src/arrow/python/decimal.cc,sha256=66Hy-u-_fcZtm_0v7npDtPNoiX-mkRJTwCj3FpSyIqc,8848 +pyarrow/src/arrow/python/decimal.h,sha256=kDDjLzW07D7d7omWSR4CBF1Ocskp4YSZu4Dtxu-gRUg,4726 +pyarrow/src/arrow/python/deserialize.cc,sha256=A4xx0gdlwPpi-n7iDUiRRcF_NnnHvmw_1d8paKMryVU,18945 +pyarrow/src/arrow/python/deserialize.h,sha256=Mdt9Lllc8bBwMHEusK0tmEoyuMdGGQuvl6hc6kbb1l8,3889 +pyarrow/src/arrow/python/extension_type.cc,sha256=eU5P7pufWjcEcmVeOyu1jtEZ08AWd9tkTSMfx8ph0rQ,6860 +pyarrow/src/arrow/python/extension_type.h,sha256=0gzb42y_mbw4fsYs3u8cwPFLBRlG-kkHQLgbvGtrY0U,3181 +pyarrow/src/arrow/python/filesystem.cc,sha256=0twavI91TE20Otq5kkVUwnN5sindU_mBWoVAvz1ZMgI,6152 +pyarrow/src/arrow/python/filesystem.h,sha256=FG0AcLekqaDf9IQPqKixAfIcY_ZLgIKP5NvvXdtBVUM,5126 +pyarrow/src/arrow/python/flight.cc,sha256=Iz4wAyhX7mksabELtRljCOsXRRzuYzu38Rv_yQKJarw,13995 +pyarrow/src/arrow/python/flight.h,sha256=p-lR6FUbPTil5CE54t7Y8paGtf74iuF1k4R4iqk3QWM,14269 +pyarrow/src/arrow/python/gdb.cc,sha256=K9oU5478rkOMdgninXaAAVIt5xkSQrbTZugNKHwVl04,23333 +pyarrow/src/arrow/python/gdb.h,sha256=H-qvM-nU8a_3Z5tk8PvppTwQtBMSZhQKQIVgRAsRfFg,972 +pyarrow/src/arrow/python/helpers.cc,sha256=EzpRdUijX5aq8ZXcmlaeX8ywP_POWsV1A7r6ubHUpiw,15941 +pyarrow/src/arrow/python/helpers.h,sha256=s7l6fUrCyPyrx4kp5WaKaegIAIeW3vbKXq3aWzmoQWM,5441 +pyarrow/src/arrow/python/inference.cc,sha256=_VPNqgPWeRll0Pq9boH2LEBLHgzb2xCEWvvWQ2phy8c,24320 +pyarrow/src/arrow/python/inference.h,sha256=FUFvB4Zy7V-tueXdmbDcqTeLK4xj5GZEeRW5yhiJlsU,2038 +pyarrow/src/arrow/python/init.cc,sha256=yTehLgcTHh9i4nae-i2jj7iKDIrprMyspj9VgKehcN4,1022 +pyarrow/src/arrow/python/init.h,sha256=3_RJGccmdF2DX2XSDZ1lzDQ2_G9D_uraZFAj3ImAPXs,948 +pyarrow/src/arrow/python/io.cc,sha256=ZARQCv4WQmHDQrA1dlNZt6mJuPhyK8wNuGm7zoL6V78,11936 +pyarrow/src/arrow/python/io.h,sha256=4jGnodpSUlnVqAVh9fWId7H4WldlLPkXyroABpdaW6w,3858 +pyarrow/src/arrow/python/ipc.cc,sha256=suovnx0AJ4VvcjxUXMC3uHTzHSDKeLFAjRo-ROEjbV4,4370 +pyarrow/src/arrow/python/ipc.h,sha256=SZbw6jCCqLiLNCY3k632GmwHeD_r_xrDS0dhqV49VhY,2259 +pyarrow/src/arrow/python/iterators.h,sha256=4U6rN8RIfZIjDoC1W4mhnCbo9xLSxBw0nAe3AJGd5vQ,7193 +pyarrow/src/arrow/python/numpy_convert.cc,sha256=kbZhod1VYigITx6FyDpPX7F7wp3Y0A8sDTbm3_2ofhU,20909 +pyarrow/src/arrow/python/numpy_convert.h,sha256=y13eHwfe1lJKzadoTr2-GyX6xPsE6Z7FN31s7PN-2Rk,4870 +pyarrow/src/arrow/python/numpy_internal.h,sha256=o9G402jSAhaFmHKfB9B--8AA_FomLrvUBLX6Aro5JYM,5072 +pyarrow/src/arrow/python/numpy_interop.h,sha256=ealkc95wDfJgfj3WLE2ymYSzoZwBXq29w7ttBAIMLbA,3392 +pyarrow/src/arrow/python/numpy_to_arrow.cc,sha256=Amq3IOBzbMj9kfBnCngOVgsgFxD8N9e-OsyZ9hu1K5M,30126 +pyarrow/src/arrow/python/numpy_to_arrow.h,sha256=z9KapsuoOSpWILPt9bea7GR4BL6AQ28T6DUO0mSkh3k,2760 +pyarrow/src/arrow/python/parquet_encryption.cc,sha256=RNupwaySaVHKX_iCYOPK0yJWkTUpqbrpbCW2duWJ3kU,3567 +pyarrow/src/arrow/python/parquet_encryption.h,sha256=yjUJwCraWb4dtefBiuepqxwqihjH6QvoscDIl5m_-vo,4819 +pyarrow/src/arrow/python/pch.h,sha256=vkbgStQjq820YeHlXBPdzQ-W9LyzJrTGfMBpnMMqahk,1129 +pyarrow/src/arrow/python/platform.h,sha256=5uLRFnjSHkajDVstni9BzYwjp_mt6_GD6LTmecEhWW8,1406 +pyarrow/src/arrow/python/pyarrow.cc,sha256=Pul4lmF7n5Q9cSzgBSvPArWfZY_qDyAq1a_tyMIQGRA,3677 +pyarrow/src/arrow/python/pyarrow.h,sha256=TK3BtD9n3QKOQ9dX3LXbQc0hu9alWcufV0O93iQW7B0,2761 +pyarrow/src/arrow/python/pyarrow_api.h,sha256=7l0G4-_m9yALYoifsY8Z6qh3HHD0PgkpVSgCn_JaGU4,867 +pyarrow/src/arrow/python/pyarrow_lib.h,sha256=-70_Ckj3_0ImlzaXSJOE_d3w9pGM66lXiGPyln9c96Y,863 +pyarrow/src/arrow/python/python_test.cc,sha256=9sN0UZ_ywBBVnm-O5RikTHpWz_Rn5sSMxue8TGnb2Cw,32382 +pyarrow/src/arrow/python/python_test.h,sha256=ea32mM20uHySlygi9MtVxr26O-ydTZHCUQIlxaIMjT4,1195 +pyarrow/src/arrow/python/python_to_arrow.cc,sha256=6ZLFOU_iG4I_ay8blWygWw00Bu-8wRIN88AA0VOz6E4,46783 +pyarrow/src/arrow/python/python_to_arrow.h,sha256=BoVytf6P7PBYXyznchElKZSFvEsFyimB-tLFdw0AUNo,2521 +pyarrow/src/arrow/python/serialize.cc,sha256=FOAsdyfRETe_bCSxC1vc3-oq9Rs9SsU4kDQFTwrdvQM,32667 +pyarrow/src/arrow/python/serialize.h,sha256=-THipGzcNDuBPVQ6WNM90oqQh1hBDvbp-YqEC1-gM38,4395 +pyarrow/src/arrow/python/type_traits.h,sha256=B_NsRT_hZG8D91sTcihJyKF5SrslPcFmj12QfbpHuLI,10093 +pyarrow/src/arrow/python/udf.cc,sha256=BplT3T9FTHI3xhWnTlq9G_6DlRT8tHs18vm0DaqHrRo,30549 +pyarrow/src/arrow/python/udf.h,sha256=de3R8PhNJO5lT9oCqRxe8e2_SE3jBpHOkwbNqCrlgjQ,3104 +pyarrow/src/arrow/python/visibility.h,sha256=ZCeRVYq2xXWoeqCzO3pzhjgB2jjQtcQNXDLdStu_EJo,1339 +pyarrow/substrait.py,sha256=ugd_UrjkUIrwSvqFxLl9WkVtBZ2-hcgt5XiSVYvDLnQ,1151 +pyarrow/table.pxi,sha256=_PDt0D3PzcWIVbUyihIrVUWYhx-gY_NvW4KT-4Bp4UA,192792 +pyarrow/tensor.pxi,sha256=uZrczmJ4qiEoaS-NZpAu3IHi4YZ98V6NZqL7FfXxuE4,41436 +pyarrow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyarrow/tests/__pycache__/__init__.cpython-310.pyc,, +pyarrow/tests/__pycache__/arrow_16597.cpython-310.pyc,, +pyarrow/tests/__pycache__/arrow_39313.cpython-310.pyc,, +pyarrow/tests/__pycache__/arrow_7980.cpython-310.pyc,, +pyarrow/tests/__pycache__/conftest.cpython-310.pyc,, +pyarrow/tests/__pycache__/pandas_examples.cpython-310.pyc,, +pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc,, +pyarrow/tests/__pycache__/read_record_batch.cpython-310.pyc,, +pyarrow/tests/__pycache__/strategies.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_acero.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_adhoc_memory_leak.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_array.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_builder.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_cffi.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_compute.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_csv.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_cuda.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_cython.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_dataset_encryption.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_deprecations.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_exec_plan.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_extension_type.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_feather.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_flight.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_flight_async.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_fs.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_gandiva.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_gdb.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_io.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_ipc.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_json.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_jvm.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_memory.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_misc.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_orc.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_scalars.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_schema.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_table.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_tensor.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_types.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_udf.cpython-310.pyc,, +pyarrow/tests/__pycache__/test_util.cpython-310.pyc,, +pyarrow/tests/__pycache__/util.cpython-310.pyc,, +pyarrow/tests/arrow_16597.py,sha256=DNb41h9E3ITGvAJJu86i5SfsKrwstQJ0E5gT_bpTS_k,1354 +pyarrow/tests/arrow_39313.py,sha256=0pyBixoX38fldTPO1Vwshi_H0XBACrz8esYoL4o71KI,1431 +pyarrow/tests/arrow_7980.py,sha256=tZKb_tRLfxHaosDk9Yu2GLEsJjMaruXD5CKhbK_6Hq8,1094 +pyarrow/tests/bound_function_visit_strings.pyx,sha256=vDEFoNYR8BWNkCntKDuBUT8sXNRBex_5G2bFKogr1Bs,2026 +pyarrow/tests/conftest.py,sha256=9Fz7KMsYY46BFUavjQa_J05pFk2gCgM989QTHhLP_4c,9889 +pyarrow/tests/data/feather/v0.17.0.version.2-compression.lz4.feather,sha256=qzcc7Bo4OWBXYsyyKdDJwdTRstMqB1Zz0GiGYtndBnE,594 +pyarrow/tests/data/orc/README.md,sha256=_4X5XszZqQtWAVEz5N1Va4VyyayGQgNDKrcdMX2Ib4s,932 +pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz,sha256=xLjAXd-3scx3DCyeAsmxTO3dv1cj9KRvYopKe5rQNiI,50 +pyarrow/tests/data/orc/TestOrcFile.emptyFile.orc,sha256=zj0579dQBXhF7JuB-ZphkmQ81ybLo6Ca4zPV4HXoImY,523 +pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz,sha256=kLxmwMVHtfzpHqBztFjfY_PTCloaXpfHq9DDDszb8Wk,323 +pyarrow/tests/data/orc/TestOrcFile.test1.orc,sha256=A4JxgMCffTkz9-XT1QT1tg2TlYZRRz1g7iIMmqzovqA,1711 +pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz,sha256=oWf7eBR3ZtOA91OTvdeQJYos1an56msGsJwhGOan3lo,182453 +pyarrow/tests/data/orc/TestOrcFile.testDate1900.orc,sha256=nYsVYhUGGOL80gHj37si_vX0dh8QhIMSeU4sHjNideM,30941 +pyarrow/tests/data/orc/decimal.jsn.gz,sha256=kTEyYdPDAASFUX8Niyry5mRDF-Y-LsrhSAjbu453mvA,19313 +pyarrow/tests/data/orc/decimal.orc,sha256=W5cV2WdLy4OrSTnd_Qv5ntphG4TcB-MyG4UpRFwSxJY,16337 +pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet,sha256=YPGUXtw-TsOPbiNDieZHobNp3or7nHhAxJGjmIDAyqE,3948 +pyarrow/tests/data/parquet/v0.7.1.column-metadata-handling.parquet,sha256=7sebZgpfdcP37QksT3FhDL6vOA9gR6GBaq44NCVtOYw,2012 +pyarrow/tests/data/parquet/v0.7.1.parquet,sha256=vmdzhIzpBbmRkq3Gjww7KqurfSFNtQuSpSIDeQVmqys,4372 +pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet,sha256=VGgSjqihCRtdBxlUcfP5s3BSR7aUQKukW-bGgJLf_HY,4008 +pyarrow/tests/extensions.pyx,sha256=pfW662e7Y8FmDdgpmtesN2nROHg3BMiX9oIJqb0mLY8,3046 +pyarrow/tests/interchange/__init__.py,sha256=9hdXHABrVpkbpjZgUft39kOFL2xSGeG4GEua0Hmelus,785 +pyarrow/tests/interchange/__pycache__/__init__.cpython-310.pyc,, +pyarrow/tests/interchange/__pycache__/test_conversion.cpython-310.pyc,, +pyarrow/tests/interchange/__pycache__/test_interchange_spec.cpython-310.pyc,, +pyarrow/tests/interchange/test_conversion.py,sha256=x7XB5NjoVqFX0DGPFY4C_aEmFlcwHD1XqvPLR31MkJI,18440 +pyarrow/tests/interchange/test_interchange_spec.py,sha256=XDBR0dG1JDVCErZLma2GxCZdfhgBjxue20Chq34wg2E,9201 +pyarrow/tests/pandas_examples.py,sha256=RFKCW-Rn0Qz-ncd4pZWWSeUoPq63kemE3lFiVdv2dBs,5115 +pyarrow/tests/pandas_threaded_import.py,sha256=b_ubLr5dj4dWJht9552qc3S3Yt3fQQgaUH6208oZvHg,1429 +pyarrow/tests/parquet/__init__.py,sha256=dKsXU9M-sJyz2wYIuqwsKM9meOlK_qY6qhmQzIvEpCE,931 +pyarrow/tests/parquet/__pycache__/__init__.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/common.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/conftest.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/encryption.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_basic.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_compliant_nested_type.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_data_types.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_dataset.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_datetime.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_encryption.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_metadata.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_pandas.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_parquet_file.cpython-310.pyc,, +pyarrow/tests/parquet/__pycache__/test_parquet_writer.cpython-310.pyc,, +pyarrow/tests/parquet/common.py,sha256=JuI1ju1WTXMgHTJtbLd4yvgzJUjupaHEwIDbjaL8UNc,5798 +pyarrow/tests/parquet/conftest.py,sha256=rdcDrC71BLXokBL_8-DU_-LMZdUdxI2L0A-YXqiHFf4,2643 +pyarrow/tests/parquet/encryption.py,sha256=Oi3QbixApvWGoGImiW7PAjR28cTQqlRXZKMI3O7E4UY,2521 +pyarrow/tests/parquet/test_basic.py,sha256=AJLGLGyP9FxiMbDX-CUHor0vvBfbemFCFeNHiSeuPE4,34145 +pyarrow/tests/parquet/test_compliant_nested_type.py,sha256=Lz7tCPrSpv9GrKPMS-eu1LehsCTwz7KdUdCYJ8tF8dE,3901 +pyarrow/tests/parquet/test_data_types.py,sha256=AaNtvq0B_f5hcBC5WMfylkV7snMSH6hZIVhr-uKtje0,15670 +pyarrow/tests/parquet/test_dataset.py,sha256=g4BZHLYZ8Xl8qrb44h8Md0uILpdpR1hc1x-yRzfsMdM,41971 +pyarrow/tests/parquet/test_datetime.py,sha256=SI9kM_Ip-uCTyVwnP46hLYr_MEmGLlOvdZ6ASCk3mks,16318 +pyarrow/tests/parquet/test_encryption.py,sha256=AxbIDPG_j0NoOsM_GCZFY9KVYwALNmeDXAk_DoDDYhY,21747 +pyarrow/tests/parquet/test_metadata.py,sha256=CptuVzyC03MC6sq8ZA_vRADyS5_D73ssOh48JIyfF48,26436 +pyarrow/tests/parquet/test_pandas.py,sha256=_Nyg08nmeDp8YSSSvFqECbK0DO-QeCdoKvmX1_lCKoY,22778 +pyarrow/tests/parquet/test_parquet_file.py,sha256=xm5ZUCf5xmpKh7s5nTIrEiis53mfv2NqZWVRiYOTfAg,9909 +pyarrow/tests/parquet/test_parquet_writer.py,sha256=gVZUykuGUIIDYL7Fba5-clfQW0E6u3W_Icfj9scEdKU,11151 +pyarrow/tests/pyarrow_cython_example.pyx,sha256=fx6zT1bUb2-cDnwKoG71K3ozpmrNJ53kKQHHJTExGz8,2115 +pyarrow/tests/read_record_batch.py,sha256=9Y0X0h03hUXwOKZz7jBBZSwgIrjxT-FkWIw6pu38Frc,953 +pyarrow/tests/strategies.py,sha256=3f8XbXrQCVJjldEE5JAHP4QAJeBvFLzKVjqZNuni6Fo,13838 +pyarrow/tests/test_acero.py,sha256=jgSkIAGhrVffShaD0ZAm50sY-f4u9jfjCimK8ezUbbA,15003 +pyarrow/tests/test_adhoc_memory_leak.py,sha256=WybazHIFcWACCJ78tJAJkWIenjlRVAmoAnHSZOuRySo,1410 +pyarrow/tests/test_array.py,sha256=2tOCgp3BCz3xCJq7B0dZvk_3Co6okDXVCR9X5VkUdgs,129745 +pyarrow/tests/test_builder.py,sha256=VkT3v2JQey6Q0XpwJu328yeyyODE4KCZpXmZEIuxmd0,2803 +pyarrow/tests/test_cffi.py,sha256=WzK_0cu5hYbNdHIogpyEI1uATg0EPS35jI4pVysOVvA,24042 +pyarrow/tests/test_compute.py,sha256=ynkqQNS5E6X3pqLrpTUNI97AGyJF20yDY0-QnAZIfag,141744 +pyarrow/tests/test_convert_builtin.py,sha256=KSouJHkG0UX5UCV1PDpZdhLl45pOH4dIQO7ACtF-Aog,80071 +pyarrow/tests/test_cpp_internals.py,sha256=Pidht9eRmp3n3LZWvvWEcgO9znd4JvsFcsJD4RwdjBo,1762 +pyarrow/tests/test_csv.py,sha256=g7Xxx1V_-VMIu6MBqgOLPP0Z-O9EgiKNzP-wQvW54ck,76432 +pyarrow/tests/test_cuda.py,sha256=dUCje62WYRcwzfcxc0Py5GKsQFJzEbBg3oMmq6xP8YE,28023 +pyarrow/tests/test_cuda_numba_interop.py,sha256=C6y73Blh3Ht6NCNodaAMef84xf9AhqlPyok4DS8sw8M,8730 +pyarrow/tests/test_cython.py,sha256=qp0uiBMTXpnnTiRTffv2TCH1WeIhYxBdO9uL9Z0vZ2w,6953 +pyarrow/tests/test_dataset.py,sha256=MOAVcNItVqsOu2M7vkMyqZ7GrRO-6C0c5yl4MTqxLkY,207357 +pyarrow/tests/test_dataset_encryption.py,sha256=OH-j3mo3Z5Xf9x0YH-xmZAW8WPbhAhhj2o7Kpol4440,7039 +pyarrow/tests/test_deprecations.py,sha256=W_rneq4jC6zqCNoGhBDf1F28Q-0LHI7YKLgtsbV6LHM,891 +pyarrow/tests/test_dlpack.py,sha256=NkSaXejifpXWzcQJmEscVFEB2aZTe7YRGp40xUXuxKk,4708 +pyarrow/tests/test_exec_plan.py,sha256=pjOkSaWeqjN6celKxUEH3tBGXLh8kKbmSSsvKOWsbQQ,10096 +pyarrow/tests/test_extension_type.py,sha256=5bpoiZeaMCQ9GK0UnhlzSksJMRwIgADW1BYSGXWaC0Y,53902 +pyarrow/tests/test_feather.py,sha256=rWzqnnIhtuQ5enHyCoX2aNizlpriDF87Lzy-8USmIDQ,24936 +pyarrow/tests/test_flight.py,sha256=6L_HX18wDzpEzwcCia0lgZ9dETIaBTiBGtIe1YTxmp8,86293 +pyarrow/tests/test_flight_async.py,sha256=g_mNqrnNBp7GWNOWZgnVklZcVKV_vvAAChDgcQICNdo,2873 +pyarrow/tests/test_fs.py,sha256=_ulhfDDyAdmFThLeAMx049YBTerGb3d8i9MvPOK-XTM,63761 +pyarrow/tests/test_gandiva.py,sha256=AEf9ln-j5MmIMQ0JTQPhnZwbNh82ynSURsWPaKaNing,15623 +pyarrow/tests/test_gdb.py,sha256=CYbQUu3SRffTQAiL3LTLk4wFsggZHChM-ryYHIOW3Wk,44914 +pyarrow/tests/test_io.py,sha256=GtntaqUuEYAdgRFyzGNnMhH8qzLXMGG42_RuwDqVUgI,60839 +pyarrow/tests/test_ipc.py,sha256=5NZR6HFJYaUe5DcFJY0CucLMA21CL6eFdG1dIkI9Eq0,42056 +pyarrow/tests/test_json.py,sha256=AmZVlKq3LE2c20jyIptli5n-OXZDl1VOBCqe84TSD48,13057 +pyarrow/tests/test_jvm.py,sha256=L-hcFrAIx_M3Nsp1zn0Mt9OIAvSde-ZKjvlDsrrK6TY,15471 +pyarrow/tests/test_memory.py,sha256=g2t9UwU_fg42RVicRktbCx67vE82JFJJsjnjyRttwLk,8786 +pyarrow/tests/test_misc.py,sha256=oyFv5qFHA5acNLd7ZlyCyC56E0ICew5jM8JztqSGOjM,7001 +pyarrow/tests/test_orc.py,sha256=oijYMqsxPLYbpEy1NTwqlz-wiTd8aKttaZH6npXNXoY,19321 +pyarrow/tests/test_pandas.py,sha256=fSVmK4S8IjUk_Vcn9b4DgjiQcwEYHqQxHnNiweyHlZk,187578 +pyarrow/tests/test_scalars.py,sha256=LidxDKbQPpH5eQNrbQvXEpfDMSVUgrRP7ToNUsD3VsY,26804 +pyarrow/tests/test_schema.py,sha256=D7q_ekj2qNKro_Xgs8tChXPcHTWxJe973viRMmWb1oo,21729 +pyarrow/tests/test_sparse_tensor.py,sha256=kT_qci4VbF8I52fvg6L5mswsRlMwLMAku9INMzjr4UI,17436 +pyarrow/tests/test_strategies.py,sha256=ish7bxT_S9Pdi2HJK-rPI0j3LNbOM0zfa0j6NujkblI,1739 +pyarrow/tests/test_substrait.py,sha256=Rtpfa_bXgA1VQCAbp8YKB50JHw116G_wJ9J7ehmzafI,30596 +pyarrow/tests/test_table.py,sha256=oXdQa-tmNF8u7IIohd488issofXrK0g6FGRlidi9NRA,98970 +pyarrow/tests/test_tensor.py,sha256=cRwFSGDeVQ3teb-4JJz8sFpy3HaeBKGeLXwQwxoUTig,6434 +pyarrow/tests/test_types.py,sha256=BEest4GzefJ64PYZiKlXDtfKyj02c-mTyNy4aDdCgfU,41303 +pyarrow/tests/test_udf.py,sha256=Id54JGymUYmjAcxbJMxGPnDURHaUHO7Kohn_HvAImzA,28587 +pyarrow/tests/test_util.py,sha256=ozTlooHBMOP3nbX5b3dG2aanrXwxXHx1giicm0QQyPM,5030 +pyarrow/tests/util.py,sha256=kpErnTov74-_BbpSZv6MncC6vNiXnEIblwp6Qdx34mU,14037 +pyarrow/types.pxi,sha256=VHrAkRbSbk_WSEOslJRwHzjynZqm0b2IeN5iq2iBvJw,147344 +pyarrow/types.py,sha256=Woixb8A_OzBNtolWwwFGhbEWn10gavaB7S0wGMoFakQ,7240 +pyarrow/util.py,sha256=W0LXUR7nsrA5N-l3THD283bxCibS0sM1q6WLcfbFFz8,7970 +pyarrow/vendored/__init__.py,sha256=9hdXHABrVpkbpjZgUft39kOFL2xSGeG4GEua0Hmelus,785 +pyarrow/vendored/__pycache__/__init__.cpython-310.pyc,, +pyarrow/vendored/__pycache__/docscrape.cpython-310.pyc,, +pyarrow/vendored/__pycache__/version.cpython-310.pyc,, +pyarrow/vendored/docscrape.py,sha256=phTjwuzoO5hB88QerZk3uGu9c5OrZwjFzI7vEIIbCUQ,22975 +pyarrow/vendored/version.py,sha256=5-Vo4Q3kPJrm1DSGusnMlTxuA8ynI4hAryApBd6MnpQ,14345 diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..01d63408033ee7ee4ea53703645bf9cc0dc2c571 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_28_x86_64 + diff --git a/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..652a7f20a026b7151711b81e752207ae1bcfce96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow-16.0.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +__dummy__ +pyarrow diff --git a/venv/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/venv/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/xxhash/__init__.py b/venv/lib/python3.10/site-packages/xxhash/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..043c486ddf9d2770d7fbee4bff1c26985ac6cd7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/xxhash/__init__.py @@ -0,0 +1,63 @@ +from ._xxhash import ( + xxh32, + xxh32_digest, + xxh32_intdigest, + xxh32_hexdigest, + xxh64, + xxh64_digest, + xxh64_intdigest, + xxh64_hexdigest, + xxh3_64, + xxh3_64_digest, + xxh3_64_intdigest, + xxh3_64_hexdigest, + xxh3_128, + xxh3_128_digest, + xxh3_128_intdigest, + xxh3_128_hexdigest, + XXHASH_VERSION, +) + +from .version import VERSION, VERSION_TUPLE + + +xxh128 = xxh3_128 +xxh128_hexdigest = xxh3_128_hexdigest +xxh128_intdigest = xxh3_128_intdigest +xxh128_digest = xxh3_128_digest + +algorithms_available = set([ + "xxh32", + "xxh64", + "xxh3_64", + "xxh128", + "xxh3_128", +]) + + +__all__ = [ + "xxh32", + "xxh32_digest", + "xxh32_intdigest", + "xxh32_hexdigest", + "xxh64", + "xxh64_digest", + "xxh64_intdigest", + "xxh64_hexdigest", + "xxh3_64", + "xxh3_64_digest", + "xxh3_64_intdigest", + "xxh3_64_hexdigest", + "xxh3_128", + "xxh3_128_digest", + "xxh3_128_intdigest", + "xxh3_128_hexdigest", + "xxh128", + "xxh128_digest", + "xxh128_intdigest", + "xxh128_hexdigest", + "VERSION", + "VERSION_TUPLE", + "XXHASH_VERSION", + "algorithms_available", +] diff --git a/venv/lib/python3.10/site-packages/xxhash/__init__.pyi b/venv/lib/python3.10/site-packages/xxhash/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..03c62497a3aba0fc5695ee033c0a402e41ae8c44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/xxhash/__init__.pyi @@ -0,0 +1,62 @@ +import array +from typing import Union +from typing_extensions import final + +_InputType = Union[str, bytes, bytearray, memoryview, array.ArrayType[int]] + +VERSION: str +XXHASH_VERSION: str +VERSION_TUPLE: tuple[int, ...] + +algorithms_available: set[str] + +class _Hasher: + def __init__(self, input: _InputType = ..., seed: int = ...) -> None: ... + def update(self, input: _InputType) -> None: ... + def digest(self) -> bytes: ... + def hexdigest(self) -> str: ... + def intdigest(self) -> int: ... + def copy(self) -> _Hasher: ... + def reset(self) -> None: ... + @property + def digestsize(self) -> int: ... + @property + def digest_size(self) -> int: ... + @property + def block_size(self) -> int: ... + @property + def name(self) -> str: ... + @property + def seed(self) -> int: ... + +@final +class xxh32(_Hasher): ... + +@final +class xxh3_64(_Hasher): ... + +@final +class xxh3_128(_Hasher): ... + +xxh64 = xxh3_64 +xxh128 = xxh3_128 + +def xxh32_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh32_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh32_intdigest(args: _InputType, seed: int = ...) -> int: ... + +def xxh3_64_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh3_64_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh3_64_intdigest(args: _InputType, seed: int = ...) -> int: ... + +def xxh3_128_digest(args: _InputType, seed: int = ...) -> bytes: ... +def xxh3_128_hexdigest(args: _InputType, seed: int = ...) -> str: ... +def xxh3_128_intdigest(args: _InputType, seed: int = ...) -> int: ... + +xxh64_digest = xxh3_64_digest +xxh64_hexdigest = xxh3_64_hexdigest +xxh64_intdigest = xxh3_64_intdigest + +xxh128_digest = xxh3_128_digest +xxh128_hexdigest = xxh3_128_hexdigest +xxh128_intdigest = xxh3_128_intdigest diff --git a/venv/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f25c020b029f5a5e0810ef7f69b6d6fee99d8877 Binary files /dev/null and b/venv/lib/python3.10/site-packages/xxhash/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4e84f80e710dab65c677f8dac1c2048255772ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/xxhash/__pycache__/version.cpython-310.pyc differ