diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d8b5e0f1550f01d36bfb8a2729b2d56a0f8511 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954ff42ccf62618a2620be0f86182183de67e787 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2380578ed2cba785f2b284ac306e584060d24aaa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40f58bc4501c26d491472be1c28abdf22c239777 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cadbdab333edade08621670fd767fe49e8ec7334 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfe39e5591682dad92314284c92690b25bf6b541 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f9d5041a6fc2fa36f7c9d2e76931598a567f22f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/logger.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b835d070093864a9c0a8ee2f51e77f11ce737b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/logger.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/objtypes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/objtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd178ce398ff5312961a76e844d907057034b4f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/objtypes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/pointers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/pointers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3da16963f2914e8b25395e1eea9cab75c2fa16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/pointers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a906c9e1b94399f8fbead98151d28ebbd42b3083 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/settings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eed219ff9ac658347830cb128567959a6c954b18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/settings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/source.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/source.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4326267486085c8d2377fca8f30e9e7cacfe5fe8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/source.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fee46f5d4569d58542a014a3ea6c540ed1bccc5e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3fbec382c8b8ad0967d0056ae85ed281bb6a4541 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/__init__.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +to run this test suite, first build and install `dill`. + + $ python -m pip install ../.. + + +then run the tests with: + + $ python -m dill.tests + + +or, if `nose` is installed: + + $ nosetests + +""" diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/__main__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..678f0e2d2c2f7fe323251d8f331e25991178c1a3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/__main__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_check.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..617a8c38d6778281523bbb1c279be38f3d9bca5d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_check.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_classdef.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_classdef.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0162a118783d0760b633457b3a4496bbe7445354 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_classdef.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_detect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_detect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d7b4b4b92ec0c6a6e840ef5f61915ab865b2a0d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_detect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_dictviews.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_dictviews.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1637e92f9256c5a1e20bec256dd086ddec0ea431 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_dictviews.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_diff.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_diff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa74ba21b06ad8878b251bca157f7ef825cf4592 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_diff.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_fglobals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_fglobals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c368dcc063335d51d6f1246f58b9f8380b4b4e01 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_fglobals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_file.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ecc7e3573f49108c8bf64bcee1c3e201415e39 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_file.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138dba51affe18924fdce27265d0bcc4bdf63372 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8abf245fe01b2b8d13bde1f083b880c7e43614a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_functors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_logger.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7126e34ff5d342aa341eeb117da116f31b529568 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_logger.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_mixins.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_mixins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0607d1ad0aeb20a121d529f248d33e0bf6a0477b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_mixins.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_nested.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_nested.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..858118f883358ac6ad00f4e2f5885c75d97622ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_nested.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_objects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bf87ee51d75629ba9452c6f2ca3194240529193 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_objects.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_properties.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_properties.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..842091bf58153a319fc1bfb7ee3f6cfde867c6e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_properties.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_pycapsule.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_pycapsule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01bbd556491af69b75114b2e088d4f0a9012e2d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_pycapsule.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_recursive.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_recursive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b759a258936cfb5d7763b3545c232f64eedfe27 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_recursive.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_registered.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_registered.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb2187737d462d6f5b596af89ccf7d69734069a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_registered.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_restricted.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_restricted.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3266b3aa973ff2152e83fbf522c321f346df7ad2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_restricted.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_session.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3799085b92418ede56b86ab3a39c7d8dd5c57b46 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_session.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_source.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_source.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e683c3b48bc3f9b123117f33afe3f363cac68fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_source.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_temp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_temp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba806ecb2e41a54e78f61446ef2f2ebf1a1a7a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_temp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_weakref.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_weakref.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a27ce8ef9baf8a2c11f4947e0bda8f271caa1495 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/dill/tests/__pycache__/test_weakref.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_dataclasses.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_dataclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..10dc51c50a129b7a28b4959d4a01c68e2c76a346 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_dataclasses.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Author: Anirudh Vegesana (avegesan@cs.stanford.edu) +# Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +test pickling a dataclass +""" + +import dill +import dataclasses + +def test_dataclasses(): + # Issue #500 + @dataclasses.dataclass + class A: + x: int + y: str + + @dataclasses.dataclass + class B: + a: A + + a = A(1, "test") + before = B(a) + save = dill.dumps(before) + after = dill.loads(save) + assert before != after # classes don't match + assert before == B(A(**dataclasses.asdict(after.a))) + assert dataclasses.asdict(before) == dataclasses.asdict(after) + +if __name__ == '__main__': + test_dataclasses() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_diff.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..a175305344aeaf8a61c52ab2d16e1084671a0887 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_diff.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +from dill import __diff as diff + +import sys +IS_PYPY = not hasattr(sys, 'getrefcount') + +class A: + pass + +def test_diff(): + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + c.a = 2 + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + a2 = [] + b2 = [a2] + c2 = [b2] + diff.memorise(c2) + assert not diff.has_changed(c2) + a2.append(1) + assert diff.has_changed(c2) + changed = diff.whats_changed(c2) + assert changed[0] == {} + assert changed[1] + + a3 = {} + b3 = {1: a3} + c3 = {1: b3} + diff.memorise(c3) + assert not diff.has_changed(c3) + a3[1] = 1 + assert diff.has_changed(c3) + changed = diff.whats_changed(c3) + assert changed[0] == {} + assert changed[1] + + if not IS_PYPY: + import abc + # make sure the "_abc_invaldation_counter" doesn't make test fail + diff.memorise(abc.ABCMeta, force=True) + assert not diff.has_changed(abc) + abc.ABCMeta.zzz = 1 + assert diff.has_changed(abc) + changed = diff.whats_changed(abc) + assert list(changed[0].keys()) == ["ABCMeta"] + assert not changed[1] + + ''' + import Queue + diff.memorise(Queue, force=True) + assert not diff.has_changed(Queue) + Queue.Queue.zzz = 1 + assert diff.has_changed(Queue) + changed = diff.whats_changed(Queue) + assert list(changed[0].keys()) == ["Queue"] + assert not changed[1] + + import math + diff.memorise(math, force=True) + assert not diff.has_changed(math) + math.zzz = 1 + assert diff.has_changed(math) + changed = diff.whats_changed(math) + assert list(changed[0].keys()) == ["zzz"] + assert not changed[1] + ''' + + a = A() + b = A() + c = A() + a.a = b + b.a = c + diff.memorise(a) + assert not diff.has_changed(a) + c.a = 1 + assert diff.has_changed(a) + diff.memorise(c, force=True) + assert not diff.has_changed(a) + del c.a + assert diff.has_changed(a) + changed = diff.whats_changed(a) + assert list(changed[0].keys()) == ["a"] + assert not changed[1] + + +if __name__ == '__main__': + test_diff() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_file.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_file.py new file mode 100644 index 0000000000000000000000000000000000000000..ad949f36704ae41a03c47245af3b84a85b803f59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_file.py @@ -0,0 +1,500 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import os +import sys +import string +import random + +import dill + + +dill.settings['recurse'] = True + +fname = "_test_file.txt" +rand_chars = list(string.ascii_letters) + ["\n"] * 40 # bias newline + +buffer_error = ValueError("invalid buffer size") +dne_error = FileNotFoundError("[Errno 2] No such file or directory: '%s'" % fname) + + +def write_randomness(number=200): + f = open(fname, "w") + for i in range(number): + f.write(random.choice(rand_chars)) + f.close() + f = open(fname, "r") + contents = f.read() + f.close() + return contents + + +def trunc_file(): + open(fname, "w").close() + + +def throws(op, args, exc): + try: + op(*args) + except type(exc): + return sys.exc_info()[1].args == exc.args + else: + return False + + +def teardown_module(): + if os.path.exists(fname): + os.remove(fname) + + +def bench(strictio, fmode, skippypy): + import platform + if skippypy and platform.python_implementation() == 'PyPy': + # Skip for PyPy... + return + + # file exists, with same contents + # read + + write_randomness() + + f = open(fname, "r") + _f = dill.loads(dill.dumps(f, fmode=fmode))#, strictio=strictio)) + assert _f.mode == f.mode + assert _f.tell() == f.tell() + assert _f.read() == f.read() + f.close() + _f.close() + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) #FIXME: fails due to pypy/issues/1233 + # TypeError: expected py_object instance instead of str + f2mode = f2.mode + f2tell = f2.tell() + f2name = f2.name + f2.write(" world!") + f2.close() + + if fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + assert f2name == fname + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (smaller size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 150 + _fstr = write_randomness(number=_flen) + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == _flen + assert f2.read() == "" + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 150 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "h world!" + assert f2mode == f1mode + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + fstr = open(fname).read() + + f = open(fname, "w") + f.write("h") + _ftell = f.tell() + f.close() + + if strictio: # throw error if ftell > EOF + assert throws(dill.loads, (f_dumped,), buffer_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # position of writes cannot be changed on some OSs + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "h world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # file does not exist + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + # FIXME: this fails on systems where f2.tell() always returns 0 + # assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == "" + assert f2.tell() == 0 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == "" + assert f2.tell() == 0 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # write + + write_randomness() + + f = open(fname, "w+") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2mode == 'w+' + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + ftell = f.tell() + f1mode = f.mode + f.close() + + os.remove(fname) + + if strictio: # throw error if file DNE + assert throws(dill.loads, (f_dumped,), dne_error) + else: + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + + # file exists, with different contents (larger size) + # read + + write_randomness() + + f = open(fname, "r") + fstr = f.read() + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + f.close() + _flen = 250 + _fstr = write_randomness(number=_flen) + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + assert f2.mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == _fstr[ftell:] + f2.seek(0) + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.HANDLE_FMODE: + assert f2.tell() == 0 + assert f2.read() == _fstr + assert f2.tell() == _flen # 250 + elif fmode == dill.FILE_FMODE: + assert f2.tell() == ftell # 200 + assert f2.read() == "" + f2.seek(0) + assert f2.read() == fstr + assert f2.tell() == ftell # 200 + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() # XXX: other alternatives? + + # write + + f = open(fname, "w") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello world!odbye!" + assert f2mode == f1mode + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == " world!" + assert f2mode == f1mode + assert f2tell == 0 + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2mode == f1mode + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + # append + + trunc_file() + + f = open(fname, "a") + f.write("hello") + f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio) + f1mode = f.mode + ftell = f.tell() + fstr = open(fname).read() + + f.write(" and goodbye!") + _ftell = f.tell() + f.close() + + # XXX: no safe_file: no way to be 'safe'? + + f2 = dill.loads(f_dumped) + f2mode = f2.mode + f2tell = f2.tell() + f2.write(" world!") + f2.close() + assert f2mode == f1mode + if fmode == dill.CONTENTS_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == ftell + elif fmode == dill.HANDLE_FMODE: + assert open(fname).read() == "hello and goodbye! world!" + assert f2tell == _ftell + elif fmode == dill.FILE_FMODE: + assert open(fname).read() == "hello world!" + assert f2tell == ftell + else: + raise RuntimeError("Unknown file mode '%s'" % fmode) + f2.close() + + +def test_nostrictio_handlefmode(): + bench(False, dill.HANDLE_FMODE, False) + teardown_module() + + +def test_nostrictio_filefmode(): + bench(False, dill.FILE_FMODE, False) + teardown_module() + + +def test_nostrictio_contentsfmode(): + bench(False, dill.CONTENTS_FMODE, True) + teardown_module() + + +#bench(True, dill.HANDLE_FMODE, False) +#bench(True, dill.FILE_FMODE, False) +#bench(True, dill.CONTENTS_FMODE, True) + + +if __name__ == '__main__': + test_nostrictio_handlefmode() + test_nostrictio_filefmode() + test_nostrictio_contentsfmode() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_module.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_module.py new file mode 100644 index 0000000000000000000000000000000000000000..b696d728eb83cb2f146100b3bff1eb33f88440a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_module.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +import dill +import test_mixins as module +from importlib import reload +dill.settings['recurse'] = True + +cached = (module.__cached__ if hasattr(module, "__cached__") + else module.__file__.split(".", 1)[0] + ".pyc") + +module.a = 1234 + +pik_mod = dill.dumps(module) + +module.a = 0 + +# remove module +del sys.modules[module.__name__] +del module + +module = dill.loads(pik_mod) +def test_attributes(): + #assert hasattr(module, "a") and module.a == 1234 #FIXME: -m dill.tests + assert module.double_add(1, 2, 3) == 2 * module.fx + +# Restart, and test use_diff + +reload(module) + +try: + dill.use_diff() + + module.a = 1234 + + pik_mod = dill.dumps(module) + + module.a = 0 + + # remove module + del sys.modules[module.__name__] + del module + + module = dill.loads(pik_mod) + def test_diff_attributes(): + assert hasattr(module, "a") and module.a == 1234 + assert module.double_add(1, 2, 3) == 2 * module.fx + +except AttributeError: + def test_diff_attributes(): + pass + +# clean up +import os +if os.path.exists(cached): + os.remove(cached) +pycache = os.path.join(os.path.dirname(module.__file__), "__pycache__") +if os.path.exists(pycache) and not os.listdir(pycache): + os.removedirs(pycache) + + +# test when module is None +import math + +def get_lambda(str, **kwarg): + return eval(str, kwarg, None) + +obj = get_lambda('lambda x: math.exp(x)', math=math) + +def test_module_is_none(): + assert obj.__module__ is None + assert dill.copy(obj)(3) == obj(3) + + +if __name__ == '__main__': + test_attributes() + test_diff_attributes() + test_module_is_none() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_objects.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..7db288de0fc313d52f73f70e99a636242f45cc57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_objects.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +demonstrate dill's ability to pickle different python types +test pickling of all Python Standard Library objects (currently: CH 1-14 @ 2.7) +""" + +import dill as pickle +pickle.settings['recurse'] = True +#pickle.detect.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types, objects, extend +load_types(pickleable=True,unpickleable=False) + +# uncomment the next two lines to test cloudpickle +#extend(False) +#import cloudpickle as pickle + +# helper objects +class _class: + def _method(self): + pass + +# objects that *fail* if imported +special = {} +special['LambdaType'] = _lambda = lambda x: lambda y: x +special['MethodType'] = _method = _class()._method +special['UnboundMethodType'] = _class._method +objects.update(special) + +def pickles(name, exact=False, verbose=True): + """quick check if object pickles with dill""" + obj = objects[name] + try: + pik = pickle.loads(pickle.dumps(obj)) + if exact: + try: + assert pik == obj + except AssertionError: + assert type(obj) == type(pik) + if verbose: print ("weak: %s %s" % (name, type(obj))) + else: + assert type(obj) == type(pik) + except Exception: + if verbose: print ("fails: %s %s" % (name, type(obj))) + + +def test_objects(verbose=True): + for member in objects.keys(): + #pickles(member, exact=True, verbose=verbose) + pickles(member, exact=False, verbose=verbose) + +if __name__ == '__main__': + import warnings + warnings.simplefilter('ignore') + test_objects(verbose=False) diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_properties.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_properties.py new file mode 100644 index 0000000000000000000000000000000000000000..df3f5b58fac06d7dad962916eccd96f18d835190 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_properties.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys + +import dill +dill.settings['recurse'] = True + + +class Foo(object): + def __init__(self): + self._data = 1 + + def _get_data(self): + return self._data + + def _set_data(self, x): + self._data = x + + data = property(_get_data, _set_data) + + +def test_data_not_none(): + FooS = dill.copy(Foo) + assert FooS.data.fget is not None + assert FooS.data.fset is not None + assert FooS.data.fdel is None + + +def test_data_unchanged(): + FooS = dill.copy(Foo) + try: + res = FooS().data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1 + + +def test_data_changed(): + FooS = dill.copy(Foo) + try: + f = FooS() + f.data = 1024 + res = f.data + except Exception: + e = sys.exc_info()[1] + raise AssertionError(str(e)) + else: + assert res == 1024 + + +if __name__ == '__main__': + test_data_not_none() + test_data_unchanged() + test_data_changed() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_recursive.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_recursive.py new file mode 100644 index 0000000000000000000000000000000000000000..b84f19e4bd5fecadaf4ac1fe63dd08a583e88358 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_recursive.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2019-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +from functools import partial +import warnings + + +def copy(obj, byref=False, recurse=False): + if byref: + try: + return dill.copy(obj, byref=byref, recurse=recurse) + except Exception: + pass + else: + raise AssertionError('Copy of %s with byref=True should have given a warning!' % (obj,)) + + warnings.simplefilter('ignore') + val = dill.copy(obj, byref=byref, recurse=recurse) + warnings.simplefilter('error') + return val + else: + return dill.copy(obj, byref=byref, recurse=recurse) + + +class obj1(object): + def __init__(self): + super(obj1, self).__init__() + +class obj2(object): + def __init__(self): + super(obj2, self).__init__() + +class obj3(object): + super_ = super + def __init__(self): + obj3.super_(obj3, self).__init__() + + +def test_super(): + assert copy(obj1(), byref=True) + assert copy(obj1(), byref=True, recurse=True) + assert copy(obj1(), recurse=True) + assert copy(obj1()) + + assert copy(obj2(), byref=True) + assert copy(obj2(), byref=True, recurse=True) + assert copy(obj2(), recurse=True) + assert copy(obj2()) + + assert copy(obj3(), byref=True) + assert copy(obj3(), byref=True, recurse=True) + assert copy(obj3(), recurse=True) + assert copy(obj3()) + + +def get_trigger(model): + pass + +class Machine(object): + def __init__(self): + self.child = Model() + self.trigger = partial(get_trigger, self) + self.child.trigger = partial(get_trigger, self.child) + +class Model(object): + pass + + + +def test_partial(): + assert copy(Machine(), byref=True) + assert copy(Machine(), byref=True, recurse=True) + assert copy(Machine(), recurse=True) + assert copy(Machine()) + + +class Machine2(object): + def __init__(self): + self.go = partial(self.member, self) + def member(self, model): + pass + + +class SubMachine(Machine2): + def __init__(self): + super(SubMachine, self).__init__() + + +def test_partials(): + assert copy(SubMachine(), byref=True) + assert copy(SubMachine(), byref=True, recurse=True) + assert copy(SubMachine(), recurse=True) + assert copy(SubMachine()) + + +class obj4(object): + def __init__(self): + super(obj4, self).__init__() + a = self + class obj5(object): + def __init__(self): + super(obj5, self).__init__() + self.a = a + self.b = obj5() + + +def test_circular_reference(): + assert copy(obj4()) + obj4_copy = dill.loads(dill.dumps(obj4())) + assert type(obj4_copy) is type(obj4_copy).__init__.__closure__[0].cell_contents + assert type(obj4_copy.b) is type(obj4_copy.b).__init__.__closure__[0].cell_contents + + +def f(): + def g(): + return g + return g + + +def test_function_cells(): + assert copy(f()) + + +def fib(n): + assert n >= 0 + if n <= 1: + return n + else: + return fib(n-1) + fib(n-2) + + +def test_recursive_function(): + global fib + fib2 = copy(fib, recurse=True) + fib3 = copy(fib) + fib4 = fib + del fib + assert fib2(5) == 5 + for _fib in (fib3, fib4): + try: + _fib(5) + except Exception: + # This is expected to fail because fib no longer exists + pass + else: + raise AssertionError("Function fib shouldn't have been found") + fib = fib4 + + +def collection_function_recursion(): + d = {} + def g(): + return d + d['g'] = g + return g + + +def test_collection_function_recursion(): + g = copy(collection_function_recursion()) + assert g()['g'] is g + + +if __name__ == '__main__': + with warnings.catch_warnings(): + warnings.simplefilter('error') + test_super() + test_partial() + test_partials() + test_circular_reference() + test_function_cells() + test_recursive_function() + test_collection_function_recursion() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_temp.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_temp.py new file mode 100644 index 0000000000000000000000000000000000000000..30ae35a31b0c6f468a9602387f22cddd93ead4f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_temp.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import sys +from dill.temp import dump, dump_source, dumpIO, dumpIO_source +from dill.temp import load, load_source, loadIO, loadIO_source +WINDOWS = sys.platform[:3] == 'win' + + +f = lambda x: x**2 +x = [1,2,3,4,5] + +# source code to tempfile +def test_code_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + pyfile = dump_source(f, alias='_f') + _f = load_source(pyfile) + assert _f(4) == f(4) + +# source code to stream +def test_code_to_stream(): + pyfile = dumpIO_source(f, alias='_f') + _f = loadIO_source(pyfile) + assert _f(4) == f(4) + +# pickle to tempfile +def test_pickle_to_tempfile(): + if not WINDOWS: #see: https://bugs.python.org/issue14243 + dumpfile = dump(x) + _x = load(dumpfile) + assert _x == x + +# pickle to stream +def test_pickle_to_stream(): + dumpfile = dumpIO(x) + _x = loadIO(dumpfile) + assert _x == x + +### now testing the objects ### +f = lambda x: x**2 +def g(x): return f(x) - x + +def h(x): + def g(x): return x + return g(x) - x + +class Foo(object): + def bar(self, x): + return x*x+x +_foo = Foo() + +def add(x,y): + return x+y + +# yes, same as 'f', but things are tricky when it comes to pointers +squared = lambda x:x**2 + +class Bar: + pass +_bar = Bar() + + +# test function-type objects that take 2 args +def test_two_arg_functions(): + for obj in [add]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4,2) == obj(4,2) + +# test function-type objects that take 1 arg +def test_one_arg_functions(): + for obj in [g, h, squared]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj(4) == obj(4) + +# test instance-type objects +#for obj in [_bar, _foo]: +# pyfile = dumpIO_source(obj, alias='_obj') +# _obj = loadIO_source(pyfile) +# assert type(_obj) == type(obj) + +# test the rest of the objects +def test_the_rest(): + for obj in [Bar, Foo, Foo.bar, _foo.bar]: + pyfile = dumpIO_source(obj, alias='_obj') + _obj = loadIO_source(pyfile) + assert _obj.__name__ == obj.__name__ + + +if __name__ == '__main__': + test_code_to_tempfile() + test_code_to_stream() + test_pickle_to_tempfile() + test_pickle_to_stream() + test_two_arg_functions() + test_one_arg_functions() + test_the_rest() diff --git a/env-llmeval/lib/python3.10/site-packages/dill/tests/test_weakref.py b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_weakref.py new file mode 100644 index 0000000000000000000000000000000000000000..df5cbce9309e45b56a84b1d605f69586b68876cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/dill/tests/test_weakref.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE + +import dill +dill.settings['recurse'] = True +import weakref + +class _class: + def _method(self): + pass + +class _callable_class: + def __call__(self): + pass + +def _function(): + pass + + +def test_weakref(): + o = _class() + oc = _callable_class() + f = _function + x = _class + + # ReferenceType + r = weakref.ref(o) + d_r = weakref.ref(_class()) + fr = weakref.ref(f) + xr = weakref.ref(x) + + # ProxyType + p = weakref.proxy(o) + d_p = weakref.proxy(_class()) + + # CallableProxyType + cp = weakref.proxy(oc) + d_cp = weakref.proxy(_callable_class()) + fp = weakref.proxy(f) + xp = weakref.proxy(x) + + objlist = [r,d_r,fr,xr, p,d_p, cp,d_cp,fp,xp] + #dill.detect.trace(True) + + for obj in objlist: + res = dill.detect.errors(obj) + if res: + print ("%r:\n %s" % (obj, res)) + # else: + # print ("PASS: %s" % obj) + assert not res + +def test_dictproxy(): + from dill._dill import DictProxyType + try: + m = DictProxyType({"foo": "bar"}) + except Exception: + m = type.__dict__ + mp = dill.copy(m) + assert mp.items() == m.items() + + +if __name__ == '__main__': + test_weakref() + from dill._dill import IS_PYPY + if not IS_PYPY: + test_dictproxy() diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__init__.py b/env-llmeval/lib/python3.10/site-packages/numexpr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7946f8522d7c5e536d6def85e69fd7b84aa0373d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/__init__.py @@ -0,0 +1,68 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +""" +Numexpr is a fast numerical expression evaluator for NumPy. With it, +expressions that operate on arrays (like "3*a+4*b") are accelerated +and use less memory than doing the same calculation in Python. + +See: + +https://github.com/pydata/numexpr + +for more info about it. + +""" + +from numexpr.interpreter import MAX_THREADS, use_vml, __BLOCK_SIZE1__ + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE + +# cpuinfo imports were moved into the test submodule function that calls them +# to improve import times. + +import os, os.path +import platform +from numexpr.expressions import E +from numexpr.necompiler import (NumExpr, disassemble, evaluate, re_evaluate, + validate) + +from numexpr.utils import (_init_num_threads, + get_vml_version, set_vml_accuracy_mode, set_vml_num_threads, + set_num_threads, get_num_threads, + detect_number_of_cores, detect_number_of_threads) + +# Detect the number of cores +ncores = detect_number_of_cores() +# Initialize the number of threads to be used +nthreads = _init_num_threads() +# The default for VML is 1 thread (see #39) +# set_vml_num_threads(1) + +from . import version +__version__ = version.version + +def print_versions(): + """Print the versions of software that numexpr relies on.""" + try: + import numexpr.tests + return numexpr.tests.print_versions() + except ImportError: + # To maintain Python 2.6 compatibility we have simple error handling + raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.') + +def test(verbosity=1): + """Run all the tests in the test suite.""" + try: + import numexpr.tests + return numexpr.tests.test(verbosity=verbosity) + except ImportError: + # To maintain Python 2.6 compatibility we have simple error handling + raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.') \ No newline at end of file diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36bafaddc06bf2a105835d22a676a3fef555dada Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818b35b34dd63f4ca1d63e6c9368527c5918d496 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..330eeefbb50af6e7de6f8a8f7c969e2b86d1e736 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0089460e8cf0cbc14a12bea7c7d55dc62b2d532d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b850069ffc6ebfca7de26aa1651f57782a4dee87 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/cpuinfo.py b/env-llmeval/lib/python3.10/site-packages/numexpr/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..4a57d3cb03fd2851d5ac886d953080409fc4725c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/cpuinfo.py @@ -0,0 +1,859 @@ +################################################################### +# cpuinfo - Get information about CPU +# +# License: BSD +# Author: Pearu Peterson +# +# See LICENSES/cpuinfo.txt for details about copyright and +# rights to use. +#################################################################### + +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson +""" + +__all__ = ['cpu'] + +import sys, re, types +import os +import subprocess +import warnings +import platform +import inspect + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, _ = p.communicate() + status = p.returncode + except EnvironmentError as e: + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, '' + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel + 1) + if ok: + info[key] = output.strip() + return info + + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel + 1) + if not ok: + return + + # XXX: check + output = output.decode('ascii') + + for line in output.splitlines(): + yield line.strip() + + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel + 1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + + +class CPUInfoBase(object): + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_' + name): + attr = getattr(self, '_' + name) + if inspect.ismethod(attr): + return lambda func=self._try_call, attr=attr: func(attr) + else: + return lambda: None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + + +class LinuxCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = [{}] + ok, output = getoutput(['uname', '-m']) + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except EnvironmentError as e: + warnings.warn(str(e), UserWarning) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): + pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id'] == 'AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu'] == 'Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id'] == 'GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu'] == 'i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return (self.is_Intel() and + self.info[0]['cpu family'] in ('6', '15') and + # two s sse3; three s ssse3 not the same thing, this is fine + (self.has_sse3() and not self.has_ssse3()) and + re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) + + def _is_Core2(self): + return (self.is_64bit() and self.is_Intel() and + re.match(r'.*?Core\(TM\)2\b', + self.info[0]['model name']) is not None) + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Power + def _is_Power(self): + return re.match(r'.*POWER.*', + self.info[0]['cpu']) is not None + + def _is_Power7(self): + return re.match(r'.*POWER7.*', + self.info[0]['cpu']) is not None + + def _is_Power8(self): + return re.match(r'.*POWER8.*', + self.info[0]['cpu']) is not None + + def _is_Power9(self): + return re.match(r'.*POWER9.*', + self.info[0]['cpu']) is not None + + def _has_Altivec(self): + return re.match(r'.*altivec\ supported.*', + self.info[0]['cpu']) is not None + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug'] == 'yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug'] == 'yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): + pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + + def _is_r2000(self): + return self.__cputype(2000) + + def _is_r3000(self): + return self.__cputype(3000) + + def _is_r3900(self): + return self.__cputype(3900) + + def _is_r4000(self): + return self.__cputype(4000) + + def _is_r4100(self): + return self.__cputype(4100) + + def _is_r4300(self): + return self.__cputype(4300) + + def _is_r4400(self): + return self.__cputype(4400) + + def _is_r4600(self): + return self.__cputype(4600) + + def _is_r4650(self): + return self.__cputype(4650) + + def _is_r5000(self): + return self.__cputype(5000) + + def _is_r6000(self): + return self.__cputype(6000) + + def _is_r8000(self): + return self.__cputype(8000) + + def _is_r10000(self): + return self.__cputype(10000) + + def _is_r12000(self): + return self.__cputype(12000) + + def _is_rorion(self): + return self.__cputype('orion') + + def get_ip(self): + try: + return self.info.get('MACHINE') + except: + pass + + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + + def _is_IP19(self): + return self.__machine(19) + + def _is_IP20(self): + return self.__machine(20) + + def _is_IP21(self): + return self.__machine(21) + + def _is_IP22(self): + return self.__machine(22) + + def _is_IP22_4k(self): + return self.__machine(22) and self._is_r4000() + + def _is_IP22_5k(self): + return self.__machine(22) and self._is_r5000() + + def _is_IP24(self): + return self.__machine(24) + + def _is_IP25(self): + return self.__machine(25) + + def _is_IP26(self): + return self.__machine(26) + + def _is_IP27(self): + return self.__machine(27) + + def _is_IP28(self): + return self.__machine(28) + + def _is_IP30(self): + return self.__machine(30) + + def _is_IP32(self): + return self.__machine(32) + + def _is_IP32_5k(self): + return self.__machine(32) and self._is_r5000() + + def _is_IP32_10k(self): + return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command(['sysctl', 'hw'], sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine'] == 'Power Macintosh' + + def _is_i386(self): + return self.info['arch'] == 'i386' + + def _is_ppc(self): + return self.info['arch'] == 'ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s' % n + + def _is_ppc601(self): return self.__machine(601) + + def _is_ppc602(self): return self.__machine(602) + + def _is_ppc603(self): return self.__machine(603) + + def _is_ppc603e(self): return self.__machine('603e') + + def _is_ppc604(self): return self.__machine(604) + + def _is_ppc604e(self): return self.__machine('604e') + + def _is_ppc620(self): return self.__machine(620) + + def _is_ppc630(self): return self.__machine(630) + + def _is_ppc740(self): return self.__machine(740) + + def _is_ppc7400(self): return self.__machine(7400) + + def _is_ppc7450(self): return self.__machine(7450) + + def _is_ppc750(self): return self.__machine(750) + + def _is_ppc403(self): return self.__machine(403) + + def _is_ppc505(self): return self.__machine(505) + + def _is_ppc801(self): return self.__machine(801) + + def _is_ppc821(self): return self.__machine(821) + + def _is_ppc823(self): return self.__machine(823) + + def _is_ppc860(self): return self.__machine(860) + +class NetBSDCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = {} + info['sysctl_hw'] = key_value_from_command(['sysctl', 'hw'], sep='=') + info['arch'] = info['sysctl_hw'].get('hw.machine_arch', 1) + info['machine'] = info['sysctl_hw'].get('hw.machine', 1) + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Intel(self): + if self.info['sysctl_hw'].get('hw.model', "")[0:5] == 'Intel': + return True + return False + + def _is_AMD(self): + if self.info['sysctl_hw'].get('hw.model', "")[0:3] == 'AMD': + return True + return False + +class SunOSCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i=['uname', '-i'], + isainfo_b=['isainfo', '-b'], + isainfo_n=['isainfo', '-n'], + ) + info['uname_X'] = key_value_from_command(['uname', '-X'], sep='=') + for line in command_by_line(['psrinfo', '-v', '0']): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): + pass + + def _is_i386(self): + return self.info['isainfo_n'] == 'i386' + + def _is_sparc(self): + return self.info['isainfo_n'] == 'sparc' + + def _is_sparcv9(self): + return self.info['isainfo_n'] == 'sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch'] == 'sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor'] == 'sparcv7' + + def _is_cpusparcv8(self): + return self.info['processor'] == 'sparcv8' + + def _is_cpusparcv9(self): + return self.info['processor'] == 'sparcv9' + + +class Win32CPUInfo(CPUInfoBase): + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + try: + import _winreg + except ImportError: # Python 3 + import winreg as _winreg + + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum = 0 + while 1: + try: + proc = _winreg.EnumKey(chnd, pnum) + except _winreg.error: + break + else: + pnum += 1 + info.append({"Processor": proc}) + phnd = _winreg.OpenKey(chnd, proc) + pidx = 0 + while True: + try: + name, value, vtpe = _winreg.EnumValue(phnd, pidx) + except _winreg.error: + break + else: + pidx = pidx + 1 + info[-1][name] = value + if name == "Identifier": + srch = prgx.search(value) + if srch: + info[-1]["Family"] = int(srch.group("FML")) + info[-1]["Model"] = int(srch.group("MDL")) + info[-1]["Stepping"] = int(srch.group("STP")) + except: + print(sys.exc_value, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): + pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier'] == 'AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family'] == 4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family'] == 4 + + def _is_AMDK5(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] in [0, 1, 2, 3]) + + def _is_AMDK6(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] in [6, 7]) + + def _is_AMDK6_2(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 8) + + def _is_AMDK6_3(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 9) + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier'] == 'GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family'] == 3 + + def _is_i486(self): + return self.info[0]['Family'] == 4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family'] == 5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family'] == 6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family'] == 5 + + def _is_PentiumMMX(self): + return (self.is_Intel() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 4) + + def _is_PentiumPro(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] == 1) + + def _is_PentiumII(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [3, 5, 6]) + + def _is_PentiumIII(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family'] == 15 + + def _is_PentiumM(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [9, 13, 14]) + + def _is_Core2(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [15, 16, 17]) + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return ((self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 4) or + (self.info[0]['Family'] in [6, 15])) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) or + self.info[0]['Family'] == 15) + elif self.is_AMD(): + return ((self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [6, 7, 8, 10]) or + self.info[0]['Family'] == 15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform[0:6] == 'netbsd': + cpuinfo = NetBSDCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +if __name__ == "__main__": + + cpu.is_blaa() + cpu.is_Intel() + cpu.is_Alpha() + + info = [] + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_': + r = getattr(cpu, name[1:])() + if r: + if r != 1: + info.append('%s=%s' % (name[1:], r)) + else: + info.append(name[1:]) + print('CPU information: ' + ' '.join(info)) diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/expressions.py b/env-llmeval/lib/python3.10/site-packages/numexpr/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..419d7dccd4522ec47b4147c391543897c209949e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/expressions.py @@ -0,0 +1,523 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +__all__ = ['E'] + +import operator +import sys +import threading + +import numpy + +# Declare a double type that does not exist in Python space +double = numpy.double + +# The default kind for undeclared variables +default_kind = 'double' +int_ = numpy.int32 +long_ = numpy.int64 + +type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float', + double: 'double', complex: 'complex', bytes: 'bytes', str: 'str'} +kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float, + 'double': double, 'complex': complex, 'bytes': bytes, 'str': str} +kind_rank = ('bool', 'int', 'long', 'float', 'double', 'complex', 'none') +scalar_constant_types = [bool, int_, int, float, double, complex, bytes, str] + +scalar_constant_types = tuple(scalar_constant_types) + +from numexpr import interpreter + +class Expression(): + + def __getattr__(self, name): + if name.startswith('_'): + try: + return self.__dict__[name] + except KeyError: + raise AttributeError + else: + return VariableNode(name, default_kind) + + +E = Expression() + + +class Context(threading.local): + + def get(self, value, default): + return self.__dict__.get(value, default) + + def get_current_context(self): + return self.__dict__ + + def set_new_context(self, dict_): + self.__dict__.update(dict_) + +# This will be called each time the local object is used in a separate thread +_context = Context() + + +def get_optimization(): + return _context.get('optimization', 'none') + + +# helper functions for creating __magic__ methods +def ophelper(f): + def func(*args): + args = list(args) + for i, x in enumerate(args): + if isConstant(x): + args[i] = x = ConstantNode(x) + if not isinstance(x, ExpressionNode): + raise TypeError("unsupported object type: %s" % type(x)) + return f(*args) + + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + func.__dict__.update(f.__dict__) + return func + + +def allConstantNodes(args): + "returns True if args are all ConstantNodes." + for x in args: + if not isinstance(x, ConstantNode): + return False + return True + + +def isConstant(ex): + "Returns True if ex is a constant scalar of an allowed type." + return isinstance(ex, scalar_constant_types) + + +def commonKind(nodes): + node_kinds = [node.astKind for node in nodes] + str_count = node_kinds.count('bytes') + node_kinds.count('str') + if 0 < str_count < len(node_kinds): # some args are strings, but not all + raise TypeError("strings can only be operated with strings") + if str_count > 0: # if there are some, all of them must be + return 'bytes' + n = -1 + for x in nodes: + n = max(n, kind_rank.index(x.astKind)) + return kind_rank[n] + + +max_int32 = 2147483647 +min_int32 = -max_int32 - 1 + + +def bestConstantType(x): + # ``numpy.string_`` is a subclass of ``bytes`` + if isinstance(x, (bytes, str)): + return bytes + # Numeric conversion to boolean values is not tried because + # ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be + # interpreted as booleans when ``False`` and ``True`` are already + # supported. + if isinstance(x, (bool, numpy.bool_)): + return bool + # ``long`` objects are kept as is to allow the user to force + # promotion of results by using long constants, e.g. by operating + # a 32-bit array with a long (64-bit) constant. + if isinstance(x, (long_, numpy.int64)): + return long_ + # ``double`` objects are kept as is to allow the user to force + # promotion of results by using double constants, e.g. by operating + # a float (32-bit) array with a double (64-bit) constant. + if isinstance(x, double): + return double + if isinstance(x, numpy.float32): + return float + if isinstance(x, (int, numpy.integer)): + # Constants needing more than 32 bits are always + # considered ``long``, *regardless of the platform*, so we + # can clearly tell 32- and 64-bit constants apart. + if not (min_int32 <= x <= max_int32): + return long_ + return int_ + # The duality of float and double in Python avoids that we have to list + # ``double`` too. + for converter in float, complex: + try: + y = converter(x) + except Exception as err: + continue + if y == x or numpy.isnan(y): + return converter + + +def getKind(x): + converter = bestConstantType(x) + return type_to_kind[converter] + + +def binop(opname, reversed=False, kind=None): + # Getting the named method from self (after reversal) does not + # always work (e.g. int constants do not have a __lt__ method). + opfunc = getattr(operator, "__%s__" % opname) + + @ophelper + def operation(self, other): + if reversed: + self, other = other, self + if allConstantNodes([self, other]): + return ConstantNode(opfunc(self.value, other.value)) + else: + return OpNode(opname, (self, other), kind=kind) + + return operation + + +def func(func, minkind=None, maxkind=None): + @ophelper + def function(*args): + if allConstantNodes(args): + return ConstantNode(func(*[x.value for x in args])) + kind = commonKind(args) + if kind in ('int', 'long'): + # Exception for following NumPy casting rules + #FIXME: this is not always desirable. The following + # functions which return ints (for int inputs) on numpy + # but not on numexpr: copy, abs, fmod, ones_like + kind = 'double' + else: + # Apply regular casting rules + if minkind and kind_rank.index(minkind) > kind_rank.index(kind): + kind = minkind + if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind): + kind = maxkind + return FuncNode(func.__name__, args, kind) + + return function + + +@ophelper +def where_func(a, b, c): + if isinstance(a, ConstantNode): + return b if a.value else c + if allConstantNodes([a, b, c]): + return ConstantNode(numpy.where(a, b, c)) + return FuncNode('where', [a, b, c]) + + +def encode_axis(axis): + if isinstance(axis, ConstantNode): + axis = axis.value + if axis is None: + axis = interpreter.allaxes + else: + if axis < 0: + raise ValueError("negative axis are not supported") + if axis > 254: + raise ValueError("cannot encode axis") + return RawNode(axis) + + +def gen_reduce_axis_func(name): + def _func(a, axis=None): + axis = encode_axis(axis) + if isinstance(a, ConstantNode): + return a + if isinstance(a, (bool, int_, long_, float, double, complex)): + a = ConstantNode(a) + return FuncNode(name, [a, axis], kind=a.astKind) + return _func + + +@ophelper +def contains_func(a, b): + return FuncNode('contains', [a, b], kind='bool') + + +@ophelper +def div_op(a, b): + if get_optimization() in ('moderate', 'aggressive'): + if (isinstance(b, ConstantNode) and + (a.astKind == b.astKind) and + a.astKind in ('float', 'double', 'complex')): + return OpNode('mul', [a, ConstantNode(1. / b.value)]) + return OpNode('div', [a, b]) + + +@ophelper +def truediv_op(a, b): + if get_optimization() in ('moderate', 'aggressive'): + if (isinstance(b, ConstantNode) and + (a.astKind == b.astKind) and + a.astKind in ('float', 'double', 'complex')): + return OpNode('mul', [a, ConstantNode(1. / b.value)]) + kind = commonKind([a, b]) + if kind in ('bool', 'int', 'long'): + kind = 'double' + return OpNode('div', [a, b], kind=kind) + + +@ophelper +def rtruediv_op(a, b): + return truediv_op(b, a) + + +@ophelper +def pow_op(a, b): + + if isinstance(b, ConstantNode): + x = b.value + if ( a.astKind in ('int', 'long') and + b.astKind in ('int', 'long') and x < 0) : + raise ValueError( + 'Integers to negative integer powers are not allowed.') + if get_optimization() == 'aggressive': + RANGE = 50 # Approximate break even point with pow(x,y) + # Optimize all integral and half integral powers in [-RANGE, RANGE] + # Note: for complex numbers RANGE could be larger. + if (int(2 * x) == 2 * x) and (-RANGE <= abs(x) <= RANGE): + n = int_(abs(x)) + ishalfpower = int_(abs(2 * x)) % 2 + + def multiply(x, y): + if x is None: return y + return OpNode('mul', [x, y]) + + r = None + p = a + mask = 1 + while True: + if (n & mask): + r = multiply(r, p) + mask <<= 1 + if mask > n: + break + p = OpNode('mul', [p, p]) + if ishalfpower: + kind = commonKind([a]) + if kind in ('int', 'long'): + kind = 'double' + r = multiply(r, OpNode('sqrt', [a], kind)) + if r is None: + r = OpNode('ones_like', [a]) + if x < 0: + # Issue #428 + r = truediv_op(ConstantNode(1), r) + return r + if get_optimization() in ('moderate', 'aggressive'): + if x == -1: + return OpNode('div', [ConstantNode(1), a]) + if x == 0: + return OpNode('ones_like', [a]) + if x == 0.5: + kind = a.astKind + if kind in ('int', 'long'): kind = 'double' + return FuncNode('sqrt', [a], kind=kind) + if x == 1: + return a + if x == 2: + return OpNode('mul', [a, a]) + return OpNode('pow', [a, b]) + +# The functions and the minimum and maximum types accepted +numpy.expm1x = numpy.expm1 +functions = { + 'copy': func(numpy.copy), + 'ones_like': func(numpy.ones_like), + 'sqrt': func(numpy.sqrt, 'float'), + + 'sin': func(numpy.sin, 'float'), + 'cos': func(numpy.cos, 'float'), + 'tan': func(numpy.tan, 'float'), + 'arcsin': func(numpy.arcsin, 'float'), + 'arccos': func(numpy.arccos, 'float'), + 'arctan': func(numpy.arctan, 'float'), + + 'sinh': func(numpy.sinh, 'float'), + 'cosh': func(numpy.cosh, 'float'), + 'tanh': func(numpy.tanh, 'float'), + 'arcsinh': func(numpy.arcsinh, 'float'), + 'arccosh': func(numpy.arccosh, 'float'), + 'arctanh': func(numpy.arctanh, 'float'), + + 'fmod': func(numpy.fmod, 'float'), + 'arctan2': func(numpy.arctan2, 'float'), + + 'log': func(numpy.log, 'float'), + 'log1p': func(numpy.log1p, 'float'), + 'log10': func(numpy.log10, 'float'), + 'exp': func(numpy.exp, 'float'), + 'expm1': func(numpy.expm1, 'float'), + + 'abs': func(numpy.absolute, 'float'), + 'ceil': func(numpy.ceil, 'float', 'double'), + 'floor': func(numpy.floor, 'float', 'double'), + + 'where': where_func, + + 'real': func(numpy.real, 'double', 'double'), + 'imag': func(numpy.imag, 'double', 'double'), + 'complex': func(complex, 'complex'), + 'conj': func(numpy.conj, 'complex'), + + 'sum': gen_reduce_axis_func('sum'), + 'prod': gen_reduce_axis_func('prod'), + 'min': gen_reduce_axis_func('min'), + 'max': gen_reduce_axis_func('max'), + 'contains': contains_func, +} + + +class ExpressionNode(): + """ + An object that represents a generic number object. + + This implements the number special methods so that we can keep + track of how this object has been used. + """ + astType = 'generic' + + def __init__(self, value=None, kind=None, children=None): + self.value = value + if kind is None: + kind = 'none' + self.astKind = kind + if children is None: + self.children = () + else: + self.children = tuple(children) + + def get_real(self): + if self.astType == 'constant': + return ConstantNode(complex(self.value).real) + return OpNode('real', (self,), 'double') + + real = property(get_real) + + def get_imag(self): + if self.astType == 'constant': + return ConstantNode(complex(self.value).imag) + return OpNode('imag', (self,), 'double') + + imag = property(get_imag) + + def __str__(self): + return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value, + self.astKind, self.children) + + def __repr__(self): + return self.__str__() + + def __neg__(self): + return OpNode('neg', (self,)) + + def __invert__(self): + return OpNode('invert', (self,)) + + def __pos__(self): + return self + + # The next check is commented out. See #24 for more info. + + def __bool__(self): + raise TypeError("You can't use Python's standard boolean operators in " + "NumExpr expressions. You should use their bitwise " + "counterparts instead: '&' instead of 'and', " + "'|' instead of 'or', and '~' instead of 'not'.") + + __add__ = __radd__ = binop('add') + __sub__ = binop('sub') + __rsub__ = binop('sub', reversed=True) + __mul__ = __rmul__ = binop('mul') + __truediv__ = truediv_op + __rtruediv__ = rtruediv_op + __pow__ = pow_op + __rpow__ = binop('pow', reversed=True) + __mod__ = binop('mod') + __rmod__ = binop('mod', reversed=True) + + __lshift__ = binop('lshift') + __rlshift__ = binop('lshift', reversed=True) + __rshift__ = binop('rshift') + __rrshift__ = binop('rshift', reversed=True) + + # boolean operations + + __and__ = binop('and', kind='bool') + __or__ = binop('or', kind='bool') + + __gt__ = binop('gt', kind='bool') + __ge__ = binop('ge', kind='bool') + __eq__ = binop('eq', kind='bool') + __ne__ = binop('ne', kind='bool') + __lt__ = binop('gt', reversed=True, kind='bool') + __le__ = binop('ge', reversed=True, kind='bool') + + +class LeafNode(ExpressionNode): + leafNode = True + + +class VariableNode(LeafNode): + astType = 'variable' + + def __init__(self, value=None, kind=None, children=None): + LeafNode.__init__(self, value=value, kind=kind) + + +class RawNode(): + """ + Used to pass raw integers to interpreter. + For instance, for selecting what function to use in func1. + Purposely don't inherit from ExpressionNode, since we don't wan't + this to be used for anything but being walked. + """ + astType = 'raw' + astKind = 'none' + + def __init__(self, value): + self.value = value + self.children = () + + def __str__(self): + return 'RawNode(%s)' % (self.value,) + + __repr__ = __str__ + + +class ConstantNode(LeafNode): + astType = 'constant' + + def __init__(self, value=None, children=None): + kind = getKind(value) + # Python float constants are double precision by default + if kind == 'float' and isinstance(value, float): + kind = 'double' + LeafNode.__init__(self, value=value, kind=kind) + + def __neg__(self): + return ConstantNode(-self.value) + + def __invert__(self): + return ConstantNode(~self.value) + + +class OpNode(ExpressionNode): + astType = 'op' + + def __init__(self, opcode=None, args=None, kind=None): + if (kind is None) and (args is not None): + kind = commonKind(args) + ExpressionNode.__init__(self, value=opcode, kind=kind, children=args) + + +class FuncNode(OpNode): + def __init__(self, opcode=None, args=None, kind=None): + if (kind is None) and (args is not None): + kind = commonKind(args) + OpNode.__init__(self, opcode, args, kind) diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/necompiler.py b/env-llmeval/lib/python3.10/site-packages/numexpr/necompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..5126bd73f7e9bce79d28c1970f558931004e32c4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/necompiler.py @@ -0,0 +1,1007 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +from typing import Optional, Dict +import __future__ +import sys +import os +import threading +import re + +import numpy + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE +from numexpr import interpreter, expressions, use_vml +from numexpr.utils import CacheDict + +# Declare a double type that does not exist in Python space +double = numpy.double +double = numpy.double + +int_ = numpy.int32 +long_ = numpy.int64 + +typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float', 'd': 'double', + 'c': 'complex', 'n': 'none', 's': 'str'} +kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f', 'double': 'd', + 'complex': 'c', 'bytes': 's', 'str': 's', 'none': 'n'} +type_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f', + double: 'd', complex: 'c', bytes: 's', str: 's'} +type_to_kind = expressions.type_to_kind +kind_to_type = expressions.kind_to_type +default_type = kind_to_type[expressions.default_kind] +scalar_constant_kinds = list(kind_to_typecode.keys()) + +# VML functions that are implemented in numexpr +vml_functions = [ + "div", # interp_body.cpp + "inv", # interp_body.cpp + "pow", # interp_body.cpp + # Keep the rest of this list in sync with the ones listed in functions.hpp + "sqrt", + "sin", + "cos", + "tan", + "arcsin", + "arccos", + "arctan", + "sinh", + "cosh", + "tanh", + "arcsinh", + "arccosh", + "arctanh", + "log", + "log1p", + "log10", + "exp", + "expm1", + "absolute", + "conjugate", + "arctan2", + "fmod", + "ceil", + "floor" + ] + + +class ASTNode(): + """Abstract Syntax Tree node. + + Members: + + astType -- type of node (op, constant, variable, raw, or alias) + astKind -- the type of the result (bool, float, etc.) + value -- value associated with this node. + An opcode, numerical value, a variable name, etc. + children -- the children below this node + reg -- the register assigned to the result for this node. + """ + cmpnames = ['astType', 'astKind', 'value', 'children'] + + def __init__(self, astType='generic', astKind='unknown', value=None, children=()): + self.astType = astType + self.astKind = astKind + self.value = value + self.children = tuple(children) + self.reg = None + + def __eq__(self, other): + if self.astType == 'alias': + self = self.value + if other.astType == 'alias': + other = other.value + if not isinstance(other, ASTNode): + return False + for name in self.cmpnames: + if getattr(self, name) != getattr(other, name): + return False + return True + + def __lt__(self,other): + # RAM: this is a fix for issue #88 whereby sorting on constants + # that may be of astKind == 'complex' but type(self.value) == int or float + # Here we let NumPy sort as it will cast data properly for comparison + # when the Python built-ins will raise an error. + if self.astType == 'constant': + if self.astKind == other.astKind: + return numpy.array(self.value) < numpy.array(other.value) + return self.astKind < other.astKind + else: + raise TypeError('Sorting not implemented for astType: %s'%self.astType) + + def __hash__(self): + if self.astType == 'alias': + self = self.value + return hash((self.astType, self.astKind, self.value, self.children)) + + def __str__(self): + return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind, + self.value, self.children, self.reg) + + def __repr__(self): + return '' % id(self) + + def key(self): + return (self.astType, self.astKind, self.value, self.children) + + def typecode(self): + return kind_to_typecode[self.astKind] + + def postorderWalk(self): + for c in self.children: + for w in c.postorderWalk(): + yield w + yield self + + def allOf(self, *astTypes): + astTypes = set(astTypes) + for w in self.postorderWalk(): + if w.astType in astTypes: + yield w + + +def expressionToAST(ex): + """Take an expression tree made out of expressions.ExpressionNode, + and convert to an AST tree. + + This is necessary as ExpressionNode overrides many methods to act + like a number. + """ + return ASTNode(ex.astType, ex.astKind, ex.value, + [expressionToAST(c) for c in ex.children]) + + +def sigPerms(s): + """Generate all possible signatures derived by upcasting the given + signature. + """ + codes = 'bilfdc' + if not s: + yield '' + elif s[0] in codes: + start = codes.index(s[0]) + for x in codes[start:]: + for y in sigPerms(s[1:]): + yield x + y + elif s[0] == 's': # numbers shall not be cast to strings + for y in sigPerms(s[1:]): + yield 's' + y + else: + yield s + + +def typeCompileAst(ast): + """Assign appropriate types to each node in the AST. + + Will convert opcodes and functions to appropriate upcast version, + and add "cast" ops if needed. + """ + children = list(ast.children) + if ast.astType == 'op': + retsig = ast.typecode() + basesig = ''.join(x.typecode() for x in list(ast.children)) + # Find some operation that will work on an acceptable casting of args. + for sig in sigPerms(basesig): + value = (ast.value + '_' + retsig + sig).encode('ascii') + if value in interpreter.opcodes: + break + else: + for sig in sigPerms(basesig): + funcname = (ast.value + '_' + retsig + sig).encode('ascii') + if funcname in interpreter.funccodes: + value = ('func_%sn' % (retsig + sig)).encode('ascii') + children += [ASTNode('raw', 'none', + interpreter.funccodes[funcname])] + break + else: + raise NotImplementedError( + "couldn't find matching opcode for '%s'" + % (ast.value + '_' + retsig + basesig)) + # First just cast constants, then cast variables if necessary: + for i, (have, want) in enumerate(zip(basesig, sig)): + if have != want: + kind = typecode_to_kind[want] + if children[i].astType == 'constant': + children[i] = ASTNode('constant', kind, children[i].value) + else: + opname = "cast" + children[i] = ASTNode('op', kind, opname, [children[i]]) + else: + value = ast.value + children = ast.children + return ASTNode(ast.astType, ast.astKind, value, + [typeCompileAst(c) for c in children]) + + +class Register(): + """Abstraction for a register in the VM. + + Members: + node -- the AST node this corresponds to + temporary -- True if this isn't an input or output + immediate -- not a register, but an immediate value + n -- the physical register number. + None if no number assigned yet. + """ + + def __init__(self, astnode, temporary=False): + self.node = astnode + self.temporary = temporary + self.immediate = False + self.n = None + + def __str__(self): + if self.temporary: + name = 'Temporary' + else: + name = 'Register' + return '%s(%s, %s, %s)' % (name, self.node.astType, + self.node.astKind, self.n,) + + def __repr__(self): + return self.__str__() + + +class Immediate(Register): + """Representation of an immediate (integer) operand, instead of + a register. + """ + + def __init__(self, astnode): + Register.__init__(self, astnode) + self.immediate = True + + def __str__(self): + return 'Immediate(%d)' % (self.node.value,) + + +_flow_pat = r'[\;\[\:]' +_dunder_pat = r'(^|[^\w])__[\w]+__($|[^\w])' +_attr_pat = r'\.\b(?!(real|imag|(\d*[eE]?[+-]?\d+)|\d*j)\b)' +_blacklist_re = re.compile(f'{_flow_pat}|{_dunder_pat}|{_attr_pat}') + +def stringToExpression(s, types, context, sanitize: bool=True): + """Given a string, convert it to a tree of ExpressionNode's. + """ + # sanitize the string for obvious attack vectors that NumExpr cannot + # parse into its homebrew AST. This is to protect the call to `eval` below. + # We forbid `;`, `:`. `[` and `__`, and attribute access via '.'. + # We cannot ban `.real` or `.imag` however... + # We also cannot ban `.\d*j`, where `\d*` is some digits (or none), e.g. 1.5j, 1.j + if sanitize: + no_whitespace = re.sub(r'\s+', '', s) + skip_quotes = re.sub(r'(\'[^\']*\')', '', no_whitespace) + if _blacklist_re.search(skip_quotes) is not None: + raise ValueError(f'Expression {s} has forbidden control characters.') + + old_ctx = expressions._context.get_current_context() + try: + expressions._context.set_new_context(context) + # first compile to a code object to determine the names + if context.get('truediv', False): + flags = __future__.division.compiler_flag + else: + flags = 0 + c = compile(s, '', 'eval', flags) + # make VariableNode's for the names + names = {} + for name in c.co_names: + if name == "None": + names[name] = None + elif name == "True": + names[name] = True + elif name == "False": + names[name] = False + else: + t = types.get(name, default_type) + names[name] = expressions.VariableNode(name, type_to_kind[t]) + names.update(expressions.functions) + + # now build the expression + ex = eval(c, names) + + if expressions.isConstant(ex): + ex = expressions.ConstantNode(ex, expressions.getKind(ex)) + elif not isinstance(ex, expressions.ExpressionNode): + raise TypeError("unsupported expression type: %s" % type(ex)) + finally: + expressions._context.set_new_context(old_ctx) + return ex + + +def isReduction(ast): + prefixes = (b'sum_', b'prod_', b'min_', b'max_') + return any(ast.value.startswith(p) for p in prefixes) + + +def getInputOrder(ast, input_order=None): + """ + Derive the input order of the variables in an expression. + """ + variables = {} + for a in ast.allOf('variable'): + variables[a.value] = a + variable_names = set(variables.keys()) + + if input_order: + if variable_names != set(input_order): + raise ValueError( + "input names (%s) don't match those found in expression (%s)" + % (input_order, variable_names)) + + ordered_names = input_order + else: + ordered_names = list(variable_names) + ordered_names.sort() + ordered_variables = [variables[v] for v in ordered_names] + return ordered_variables + + +def convertConstantToKind(x, kind): + # Exception for 'float' types that will return the NumPy float32 type + if kind == 'float': + return numpy.float32(x) + elif isinstance(x,str): + return x.encode('ascii') + return kind_to_type[kind](x) + + +def getConstants(ast): + """ + RAM: implemented magic method __lt__ for ASTNode to fix issues + #88 and #209. The following test code works now, as does the test suite. + + import numexpr as ne + a = 1 + 3j; b = 5.0 + ne.evaluate('a*2 + 15j - b') + """ + constant_registers = set([node.reg for node in ast.allOf("constant")]) + constants_order = sorted([r.node for r in constant_registers]) + constants = [convertConstantToKind(a.value, a.astKind) + for a in constants_order] + return constants_order, constants + + +def sortNodesByOrder(nodes, order): + order_map = {} + for i, (_, v, _) in enumerate(order): + order_map[v] = i + dec_nodes = [(order_map[n.value], n) for n in nodes] + dec_nodes.sort() + return [a[1] for a in dec_nodes] + + +def assignLeafRegisters(inodes, registerMaker): + """ + Assign new registers to each of the leaf nodes. + """ + leafRegisters = {} + for node in inodes: + key = node.key() + if key in leafRegisters: + node.reg = leafRegisters[key] + else: + node.reg = leafRegisters[key] = registerMaker(node) + + +def assignBranchRegisters(inodes, registerMaker): + """ + Assign temporary registers to each of the branch nodes. + """ + for node in inodes: + node.reg = registerMaker(node, temporary=True) + + +def collapseDuplicateSubtrees(ast): + """ + Common subexpression elimination. + """ + seen = {} + aliases = [] + for a in ast.allOf('op'): + if a in seen: + target = seen[a] + a.astType = 'alias' + a.value = target + a.children = () + aliases.append(a) + else: + seen[a] = a + # Set values and registers so optimizeTemporariesAllocation + # doesn't get confused + for a in aliases: + while a.value.astType == 'alias': + a.value = a.value.value + return aliases + + +def optimizeTemporariesAllocation(ast): + """ + Attempt to minimize the number of temporaries needed, by reusing old ones. + """ + nodes = [n for n in ast.postorderWalk() if n.reg.temporary] + users_of = dict((n.reg, set()) for n in nodes) + + node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary)) + for n in nodes) + if nodes and nodes[-1] is not ast: + nodes_to_check = nodes + [ast] + else: + nodes_to_check = nodes + for n in nodes_to_check: + for c in n.children: + if c.reg.temporary: + users_of[c.reg].add(n) + + unused = dict([(tc, set()) for tc in scalar_constant_kinds]) + for n in nodes: + for c in n.children: + reg = c.reg + if reg.temporary: + users = users_of[reg] + users.discard(n) + if not users: + unused[reg.node.astKind].add(reg) + if unused[n.astKind]: + reg = unused[n.astKind].pop() + users_of[reg] = users_of[n.reg] + n.reg = reg + + +def setOrderedRegisterNumbers(order, start): + """ + Given an order of nodes, assign register numbers. + """ + for i, node in enumerate(order): + node.reg.n = start + i + return start + len(order) + + +def setRegisterNumbersForTemporaries(ast, start): + """ + Assign register numbers for temporary registers, keeping track of + aliases and handling immediate operands. + """ + seen = 0 + signature = '' + aliases = [] + for node in ast.postorderWalk(): + if node.astType == 'alias': + aliases.append(node) + node = node.value + if node.reg.immediate: + node.reg.n = node.value + continue + reg = node.reg + if reg.n is None: + reg.n = start + seen + seen += 1 + signature += reg.node.typecode() + for node in aliases: + node.reg = node.value.reg + return start + seen, signature + + +def convertASTtoThreeAddrForm(ast): + """ + Convert an AST to a three address form. + + Three address form is (op, reg1, reg2, reg3), where reg1 is the + destination of the result of the instruction. + + I suppose this should be called three register form, but three + address form is found in compiler theory. + """ + return [(node.value, node.reg) + tuple([c.reg for c in node.children]) + for node in ast.allOf('op')] + + +def compileThreeAddrForm(program): + """ + Given a three address form of the program, compile it a string that + the VM understands. + """ + + def nToChr(reg): + if reg is None: + return b'\xff' + elif reg.n < 0: + raise ValueError("negative value for register number %s" % reg.n) + else: + return bytes([reg.n]) + + def quadrupleToString(opcode, store, a1=None, a2=None): + cop = chr(interpreter.opcodes[opcode]).encode('ascii') + cs = nToChr(store) + ca1 = nToChr(a1) + ca2 = nToChr(a2) + return cop + cs + ca1 + ca2 + + def toString(args): + while len(args) < 4: + args += (None,) + opcode, store, a1, a2 = args[:4] + s = quadrupleToString(opcode, store, a1, a2) + l = [s] + args = args[4:] + while args: + s = quadrupleToString(b'noop', *args[:3]) + l.append(s) + args = args[3:] + return b''.join(l) + + prog_str = b''.join([toString(t) for t in program]) + return prog_str + + +context_info = [ + ('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'), + ('truediv', (False, True, 'auto'), 'auto') +] + + +def getContext(kwargs, _frame_depth=1): + d = kwargs.copy() + context = {} + for name, allowed, default in context_info: + value = d.pop(name, default) + if value in allowed: + context[name] = value + else: + raise ValueError("'%s' must be one of %s" % (name, allowed)) + + if d: + raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0]) + if context['truediv'] == 'auto': + caller_globals = sys._getframe(_frame_depth + 1).f_globals + context['truediv'] = caller_globals.get('division', None) == __future__.division + + return context + + +def precompile(ex, signature=(), context={}, sanitize: bool=True): + """ + Compile the expression to an intermediate form. + """ + types = dict(signature) + input_order = [name for (name, type_) in signature] + + if isinstance(ex, str): + ex = stringToExpression(ex, types, context, sanitize) + + # the AST is like the expression, but the node objects don't have + # any odd interpretations + + ast = expressionToAST(ex) + + if ex.astType != 'op': + ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,)) + + ast = typeCompileAst(ast) + + aliases = collapseDuplicateSubtrees(ast) + + assignLeafRegisters(ast.allOf('raw'), Immediate) + assignLeafRegisters(ast.allOf('variable', 'constant'), Register) + assignBranchRegisters(ast.allOf('op'), Register) + + # assign registers for aliases + for a in aliases: + a.reg = a.value.reg + + input_order = getInputOrder(ast, input_order) + constants_order, constants = getConstants(ast) + + if isReduction(ast): + ast.reg.temporary = False + + optimizeTemporariesAllocation(ast) + + ast.reg.temporary = False + r_output = 0 + ast.reg.n = 0 + + r_inputs = r_output + 1 + r_constants = setOrderedRegisterNumbers(input_order, r_inputs) + r_temps = setOrderedRegisterNumbers(constants_order, r_constants) + r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps) + + threeAddrProgram = convertASTtoThreeAddrForm(ast) + input_names = tuple([a.value for a in input_order]) + signature = ''.join(type_to_typecode[types.get(x, default_type)] + for x in input_names) + return threeAddrProgram, signature, tempsig, constants, input_names + + +def NumExpr(ex, signature=(), sanitize: bool=True, **kwargs): + """ + Compile an expression built using E. variables to a function. + + ex can also be specified as a string "2*a+3*b". + + The order of the input variables and their types can be specified using the + signature parameter, which is a list of (name, type) pairs. + + Returns a `NumExpr` object containing the compiled function. + """ + + # In that case _frame_depth is wrong (it should be 2) but it doesn't matter + # since it will not be used (because truediv='auto' has already been + # translated to either True or False). + _frame_depth = 1 + context = getContext(kwargs, _frame_depth=_frame_depth) + threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context, sanitize=sanitize) + program = compileThreeAddrForm(threeAddrProgram) + return interpreter.NumExpr(inputsig.encode('ascii'), + tempsig.encode('ascii'), + program, constants, input_names) + + +def disassemble(nex): + """ + Given a NumExpr object, return a list which is the program disassembled. + """ + rev_opcodes = {} + for op in interpreter.opcodes: + rev_opcodes[interpreter.opcodes[op]] = op + r_constants = 1 + len(nex.signature) + r_temps = r_constants + len(nex.constants) + + def parseOp(op): + name, sig = [*op.rsplit(b'_', 1), ''][:2] + return name, sig + + def getArg(pc, offset): + arg = nex.program[pc + (offset if offset < 4 else offset+1)] + _, sig = parseOp(rev_opcodes.get(nex.program[pc])) + try: + code = sig[offset - 1] + except IndexError: + return None + + code = bytes([code]) + + if arg == 255: + return None + if code != b'n': + if arg == 0: + return b'r0' + elif arg < r_constants: + return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii') + elif arg < r_temps: + return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii') + else: + return ('t%d' % (arg,)).encode('ascii') + else: + return arg + + source = [] + for pc in range(0, len(nex.program), 4): + op = rev_opcodes.get(nex.program[pc]) + _, sig = parseOp(op) + parsed = [op] + for i in range(len(sig)): + parsed.append(getArg(pc, 1 + i)) + while len(parsed) < 4: + parsed.append(None) + source.append(parsed) + return source + + +def getType(a): + kind = a.dtype.kind + if kind == 'b': + return bool + if kind in 'iu': + if a.dtype.itemsize > 4: + return long_ # ``long`` is for integers of more than 32 bits + if kind == 'u' and a.dtype.itemsize == 4: + return long_ # use ``long`` here as an ``int`` is not enough + return int_ + if kind == 'f': + if a.dtype.itemsize > 4: + return double # ``double`` is for floats of more than 32 bits + return float + if kind == 'c': + return complex + if kind == 'S': + return bytes + if kind == 'U': + raise ValueError('NumExpr 2 does not support Unicode as a dtype.') + raise ValueError("unknown type %s" % a.dtype.name) + + +def getExprNames(text, context, sanitize: bool=True): + ex = stringToExpression(text, {}, context, sanitize) + ast = expressionToAST(ex) + input_order = getInputOrder(ast, None) + #try to figure out if vml operations are used by expression + if not use_vml: + ex_uses_vml = False + else: + for node in ast.postorderWalk(): + if node.astType == 'op' and node.value in vml_functions: + ex_uses_vml = True + break + else: + ex_uses_vml = False + + return [a.value for a in input_order], ex_uses_vml + + +def getArguments(names, local_dict=None, global_dict=None, _frame_depth: int=2): + """ + Get the arguments based on the names. + """ + call_frame = sys._getframe(_frame_depth) + + clear_local_dict = False + if local_dict is None: + local_dict = call_frame.f_locals + clear_local_dict = True + try: + frame_globals = call_frame.f_globals + if global_dict is None: + global_dict = frame_globals + + # If `call_frame` is the top frame of the interpreter we can't clear its + # `local_dict`, because it is actually the `global_dict`. + clear_local_dict = clear_local_dict and not frame_globals is local_dict + + arguments = [] + for name in names: + try: + a = local_dict[name] + except KeyError: + a = global_dict[name] + arguments.append(numpy.asarray(a)) + finally: + # If we generated local_dict via an explicit reference to f_locals, + # clear the dict to prevent creating extra ref counts in the caller's scope + # See https://github.com/pydata/numexpr/issues/310 + if clear_local_dict: + local_dict.clear() + + return arguments + + +# Dictionaries for caching variable names and compiled expressions +_names_cache = CacheDict(256) +_numexpr_cache = CacheDict(256) +_numexpr_last = {} +evaluate_lock = threading.Lock() + +# MAYBE: decorate this function to add attributes instead of having the +# _numexpr_last dictionary? +def validate(ex: str, + local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + out: numpy.ndarray = None, + order: str = 'K', + casting: str = 'safe', + _frame_depth: int = 2, + sanitize: Optional[bool] = None, + **kwargs) -> Optional[Exception]: + r""" + Validate a NumExpr expression with the given `local_dict` or `locals()`. + Returns `None` on success and the Exception object if one occurs. Note that + you can proceed directly to call `re_evaluate()` if you use `validate()` + to sanitize your expressions and variables in advance. + + Parameters + ---------- + ex: str + a string forming an expression, like "2*a+3*b". The values for "a" + and "b" will by default be taken from the calling function's frame + (through use of sys._getframe()). Alternatively, they can be specified + using the 'local_dict' or 'global_dict' arguments. + + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + + global_dict: dictionary, optional + A dictionary that replaces the global operands in current frame. + + out: NumPy array, optional + An existing array where the outcome is going to be stored. Care is + required so that this array has the same shape and type than the + actual outcome of the computation. Useful for avoiding unnecessary + new array allocations. + + order: {'C', 'F', 'A', or 'K'}, optional + Controls the iteration order for operands. 'C' means C order, 'F' + means Fortran order, 'A' means 'F' order if all the arrays are + Fortran contiguous, 'C' order otherwise, and 'K' means as close to + the order the array elements appear in memory as possible. For + efficient computations, typically 'K'eep order (the default) is + desired. + + casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy or + buffering. Setting this to 'unsafe' is not recommended, as it can + adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + sanitize: Optional[bool] + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr by default + performs simple sanitization, banning the character ':;[', the + dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'. + + Using `None` defaults to `True` unless the environment variable + `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`. + Nominally this can be set via `os.environ` before `import numexpr`. + + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + + Note + ---- + + """ + global _numexpr_last + + try: + + if not isinstance(ex, str): + raise ValueError("must specify expression as a string") + + if sanitize is None: + if 'NUMEXPR_SANITIZE' in os.environ: + sanitize = bool(int(os.environ['NUMEXPR_SANITIZE'])) + else: + sanitize = True + + # Get the names for this expression + context = getContext(kwargs) + expr_key = (ex, tuple(sorted(context.items()))) + if expr_key not in _names_cache: + _names_cache[expr_key] = getExprNames(ex, context, sanitize=sanitize) + names, ex_uses_vml = _names_cache[expr_key] + arguments = getArguments(names, local_dict, global_dict, _frame_depth=_frame_depth) + + # Create a signature + signature = [(name, getType(arg)) for (name, arg) in + zip(names, arguments)] + + # Look up numexpr if possible. + numexpr_key = expr_key + (tuple(signature),) + try: + compiled_ex = _numexpr_cache[numexpr_key] + except KeyError: + compiled_ex = _numexpr_cache[numexpr_key] = NumExpr(ex, signature, sanitize=sanitize, **context) + kwargs = {'out': out, 'order': order, 'casting': casting, + 'ex_uses_vml': ex_uses_vml} + _numexpr_last = dict(ex=compiled_ex, argnames=names, kwargs=kwargs) + except Exception as e: + return e + return None + +def evaluate(ex: str, + local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + out: numpy.ndarray = None, + order: str = 'K', + casting: str = 'safe', + sanitize: Optional[bool] = None, + _frame_depth: int = 3, + **kwargs) -> numpy.ndarray: + r""" + Evaluate a simple array expression element-wise using the virtual machine. + + Parameters + ---------- + ex: str + a string forming an expression, like "2*a+3*b". The values for "a" + and "b" will by default be taken from the calling function's frame + (through use of sys._getframe()). Alternatively, they can be specified + using the 'local_dict' or 'global_dict' arguments. + + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + + global_dict: dictionary, optional + A dictionary that replaces the global operands in current frame. + + out: NumPy array, optional + An existing array where the outcome is going to be stored. Care is + required so that this array has the same shape and type than the + actual outcome of the computation. Useful for avoiding unnecessary + new array allocations. + + order: {'C', 'F', 'A', or 'K'}, optional + Controls the iteration order for operands. 'C' means C order, 'F' + means Fortran order, 'A' means 'F' order if all the arrays are + Fortran contiguous, 'C' order otherwise, and 'K' means as close to + the order the array elements appear in memory as possible. For + efficient computations, typically 'K'eep order (the default) is + desired. + + casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy or + buffering. Setting this to 'unsafe' is not recommended, as it can + adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + sanitize: bool + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr by default + performs simple sanitization, banning the character ':;[', the + dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'. + + Using `None` defaults to `True` unless the environment variable + `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`. + Nominally this can be set via `os.environ` before `import numexpr`. + + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + + Note + ---- + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr does some + sanitization, banning the character ':;[', the dunder '__', and attribute + access to all but '.r' for real and '.i' for imag access to complex numbers. + """ + # We could avoid code duplication if we called validate and then re_evaluate + # here, but they we have difficulties with the `sys.getframe(2)` call in + # `getArguments` + e = validate(ex, local_dict=local_dict, global_dict=global_dict, + out=out, order=order, casting=casting, + _frame_depth=_frame_depth, sanitize=sanitize, **kwargs) + if e is None: + return re_evaluate(local_dict=local_dict, global_dict=global_dict, _frame_depth=_frame_depth) + else: + raise e + +def re_evaluate(local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + _frame_depth: int=2) -> numpy.ndarray: + """ + Re-evaluate the previous executed array expression without any check. + + This is meant for accelerating loops that are re-evaluating the same + expression repeatedly without changing anything else than the operands. + If unsure, use evaluate() which is safer. + + Parameters + ---------- + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + """ + global _numexpr_last + + try: + compiled_ex = _numexpr_last['ex'] + except KeyError: + raise RuntimeError("A previous evaluate() execution was not found, please call `validate` or `evaluate` once before `re_evaluate`") + argnames = _numexpr_last['argnames'] + args = getArguments(argnames, local_dict, global_dict, _frame_depth=_frame_depth) + kwargs = _numexpr_last['kwargs'] + with evaluate_lock: + return compiled_ex(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3fff4117bc23d630dcba2fb87aad5b08cef5b839 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__init__.py @@ -0,0 +1,14 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +from numexpr.tests.test_numexpr import test, print_versions + +if __name__ == '__main__': + test() diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac9e2fdffcfceb9dfcf150293c07534eedca54c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fa5616d7e8e923241a67a8481440f450ca43725 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..bb5b177682e3c614dba00a91bcabde1eef286941 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py @@ -0,0 +1,1348 @@ + +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + + +import os +import sys +import platform +import warnings +from contextlib import contextmanager +import subprocess + +import numpy as np +from numpy import ( + array, arange, empty, zeros, int32, int64, uint16, cdouble, float64, rec, + copy, ones_like, where, all as alltrue, linspace, + sum, prod, sqrt, fmod, floor, ceil, + sin, cos, tan, arcsin, arccos, arctan, arctan2, + sinh, cosh, tanh, arcsinh, arccosh, arctanh, + log, log1p, log10, exp, expm1, conj) +import numpy +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from numpy import shape, allclose, array_equal, ravel, isnan, isinf + +import numexpr +from numexpr import E, NumExpr, evaluate, re_evaluate, validate, disassemble, use_vml +from numexpr.expressions import ConstantNode +from numexpr.utils import detect_number_of_cores + +import unittest + +TestCase = unittest.TestCase + +double = np.double +long = int + + +class test_numexpr(TestCase): + """Testing with 1 thread""" + nthreads = 1 + + def setUp(self): + numexpr.set_num_threads(self.nthreads) + + def test_simple(self): + ex = 2.0 * E.a + 3.0 * E.b * E.c + sig = [('a', double), ('b', double), ('c', double)] + func = NumExpr(ex, signature=sig) + x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9])) + assert_array_equal(x, array([86., 124., 168.])) + + def test_simple_expr_small_array(self): + func = NumExpr(E.a) + x = arange(100.0) + y = func(x) + assert_array_equal(x, y) + + def test_simple_expr(self): + func = NumExpr(E.a) + x = arange(1e6) + y = func(x) + assert_array_equal(x, y) + + def test_rational_expr(self): + func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b)) + a = arange(1e6) + b = arange(1e6) * 0.1 + x = (a + 2 * b) / (1 + a + 4 * b * b) + y = func(a, b) + assert_array_almost_equal(x, y) + + def test_reductions(self): + # Check that they compile OK. + assert_equal(disassemble( + NumExpr("sum(x**2+2, axis=None)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'sum_ddn', b'r0', b't3', None)]) + assert_equal(disassemble( + NumExpr("sum(x**2+2, axis=1)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'sum_ddn', b'r0', b't3', 1)]) + assert_equal(disassemble( + NumExpr("prod(x**2+2, axis=2)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'prod_ddn', b'r0', b't3', 2)]) + # Check that full reductions work. + x = zeros(100000) + .01 # checks issue #41 + assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None)) + assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0)) + assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0)) + assert_allclose(evaluate("min(x)"), np.min(x)) + assert_allclose(evaluate("max(x,axis=0)"), np.max(x, axis=0)) + + # Fix for #277, array with leading singleton dimension + x = np.arange(10).reshape(1,10) + assert_allclose(evaluate("sum(x,axis=None)"), sum(x, axis=None) ) + assert_allclose(evaluate("sum(x,axis=0)"), sum(x, axis=0) ) + assert_allclose(evaluate("sum(x,axis=1)"), sum(x, axis=1) ) + + x = arange(10.0) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + + x = arange(100.0) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0)) + assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0)) + x = linspace(0.1, 1.0, 2000) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0)) + assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0)) + + # Check that reductions along an axis work + y = arange(9.0).reshape(3, 3) + assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1)) + assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0)) + assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None)) + assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1)) + assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0)) + assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None)) + assert_allclose(evaluate("min(y**2, axis=1)"), np.min(y ** 2, axis=1)) + assert_allclose(evaluate("min(y**2, axis=0)"), np.min(y ** 2, axis=0)) + assert_allclose(evaluate("min(y**2, axis=None)"), np.min(y ** 2, axis=None)) + assert_allclose(evaluate("max(y**2, axis=1)"), np.max(y ** 2, axis=1)) + assert_allclose(evaluate("max(y**2, axis=0)"), np.max(y ** 2, axis=0)) + assert_allclose(evaluate("max(y**2, axis=None)"), np.max(y ** 2, axis=None)) + # Check integers + x = arange(10.) + x = x.astype(int) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + # Check longs + x = x.astype(int) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + # Check complex + x = x + .1j + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + + def test_in_place(self): + x = arange(10000.).reshape(1000, 10) + evaluate("x + 3", out=x) + assert_equal(x, arange(10000.).reshape(1000, 10) + 3) + y = arange(10) + evaluate("(x - 3) * y + (x - 3)", out=x) + assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1)) + + def test_axis(self): + y = arange(9.0).reshape(3, 3) + try: + evaluate("sum(y, axis=2)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + try: + evaluate("sum(y, axis=-3)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + try: + # Negative axis are not supported + evaluate("sum(y, axis=-1)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + + def test_r0_reuse(self): + assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])), + [(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'), + (b'add_ddd', b'r0', b'r0', b'c2[2.0]')]) + + def test_str_contains_basic0(self): + res = evaluate('contains(b"abc", b"ab")') + assert_equal(res, True) + + def test_str_contains_basic1(self): + haystack = array([b'abc', b'def', b'xyz', b'x11', b'za']) + res = evaluate('contains(haystack, b"ab")') + assert_equal(res, [True, False, False, False, False]) + + def test_str_contains_basic2(self): + haystack = array([b'abc', b'def', b'xyz', b'x11', b'za']) + res = evaluate('contains(b"abcd", haystack)') + assert_equal(res, [True, False, False, False, False]) + + def test_str_contains_basic3(self): + haystacks = array( + [b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc']) + needles = array( + [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab']) + res = evaluate('contains(haystacks, needles)') + assert_equal(res, [True, True, False, False, False, True]) + + def test_str_contains_basic4(self): + needles = array( + [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc', + b'abc ']) + res = evaluate('contains(b"test abc here", needles)') + assert_equal(res, [True, False, False, False, False, False, True, True]) + + def test_str_contains_basic5(self): + needles = array( + [b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h']) + res = evaluate('contains(b"test abc here", needles)') + assert_equal(res, [True, False, True, True, False, True]) + + # Compare operation of Python 'in' operator with 'contains' using a + # product of two lists of strings. + + def test_str_contains_listproduct(self): + from itertools import product + + small = [ + 'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting', + ' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a', + 'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it', + ' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ', + 'it mut', 'terin', 'g to ', 'its', 'elf ', "'The", + ' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws', + '! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g", + 'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets', + ' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d', + 'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice', + ' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ', + 'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai', + 'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ', + 'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing', + ' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh', + 'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ', + 'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in', + ' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith', + ' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle', + ' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.'] + big = [ + 'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro', + 'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin', + 'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ', + 'it had', ' los', 't ', 'so', 'mething', '; and', ' she h', + 'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ", + 'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de', + 'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'", + 'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ', + 'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha', + 've dro', 'pped t', 'hem', ', I ', 'won', "der?' A", + 'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i', + 't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p', + 'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ', + 'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou', + 't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere', + ' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ', + 'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ', + 'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi', + 'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,', + ' had va', 'ni', 'shed co', 'mpletel', 'y.'] + p = list(product(small, big)) + python_in = [x[0] in x[1] for x in p] + a = [x[0].encode() for x in p] + b = [x[1].encode() for x in p] + res = [bool(x) for x in evaluate('contains(b, a)')] + assert_equal(res, python_in) + + def test_str_contains_withemptystr1(self): + withemptystr = array([b'abc', b'def', b'']) + res = evaluate('contains(b"abcd", withemptystr)') + assert_equal(res, [True, False, True]) + + def test_str_contains_withemptystr2(self): + withemptystr = array([b'abc', b'def', b'']) + res = evaluate('contains(withemptystr, b"")') + assert_equal(res, [True, True, True]) + + def test_str_contains_long_needle(self): + a = b'1' + b'a' * 40 + b = b'a' * 40 + res = evaluate('contains(a, b)') + assert_equal(res, True) + + def test_where_scalar_bool(self): + a = True + b = array([1, 2]) + c = array([3, 4]) + res = evaluate('where(a, b, c)') + assert_array_equal(res, b) + a = False + res = evaluate('where(a, b, c)') + assert_array_equal(res, c) + + @unittest.skipIf(hasattr(sys, "pypy_version_info"), + "PyPy does not have sys.getrefcount()") + def test_refcount(self): + # Regression test for issue #310 + a = array([1]) + assert sys.getrefcount(a) == 2 + evaluate('1') + assert sys.getrefcount(a) == 2 + + def test_locals_clears_globals(self): + # Check for issue #313, whereby clearing f_locals also clear f_globals + # if in the top-frame. This cannot be done inside `unittest` as it is always + # executing code in a child frame. + script = r';'.join([ + r"import numexpr as ne", + r"a=10", + r"ne.evaluate('1')", + r"a += 1", + r"ne.evaluate('2', local_dict={})", + r"a += 1", + r"ne.evaluate('3', global_dict={})", + r"a += 1", + r"ne.evaluate('4', local_dict={}, global_dict={})", + r"a += 1", + ]) + # Raises CalledProcessError on a non-normal exit + check = subprocess.check_call([sys.executable, '-c', script]) + # Ideally this test should also be done against ipython but it's not + # a requirement. + + + +class test_numexpr2(test_numexpr): + """Testing with 2 threads""" + nthreads = 2 + + +class test_evaluate(TestCase): + def test_simple(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + x = evaluate("2*a + 3*b*c") + assert_array_equal(x, array([86., 124., 168.])) + + def test_simple_expr_small_array(self): + x = arange(100.0) + y = evaluate("x") + assert_array_equal(x, y) + + def test_simple_expr(self): + x = arange(1e6) + y = evaluate("x") + assert_array_equal(x, y) + + def test_re_evaluate(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + x = evaluate("2*a + 3*b*c") + x = re_evaluate() + assert_array_equal(x, array([86., 124., 168.])) + + def test_re_evaluate_dict(self): + a1 = array([1., 2., 3.]) + b1 = array([4., 5., 6.]) + c1 = array([7., 8., 9.]) + local_dict={'a': a1, 'b': b1, 'c': c1} + x = evaluate("2*a + 3*b*c", local_dict=local_dict) + x = re_evaluate(local_dict=local_dict) + assert_array_equal(x, array([86., 124., 168.])) + + def test_validate(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + retval = validate("2*a + 3*b*c") + assert(retval is None) + x = re_evaluate() + assert_array_equal(x, array([86., 124., 168.])) + + def test_validate_missing_var(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + retval = validate("2*a + 3*b*c") + assert(isinstance(retval, KeyError)) + + def test_validate_syntax(self): + retval = validate("2+") + assert(isinstance(retval, SyntaxError)) + + def test_validate_dict(self): + a1 = array([1., 2., 3.]) + b1 = array([4., 5., 6.]) + c1 = array([7., 8., 9.]) + local_dict={'a': a1, 'b': b1, 'c': c1} + retval = validate("2*a + 3*b*c", local_dict=local_dict) + assert(retval is None) + x = re_evaluate(local_dict=local_dict) + assert_array_equal(x, array([86., 124., 168.])) + + # Test for issue #22 + def test_true_div(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x/2"), x / 2) + assert_array_equal(evaluate("x/2", truediv=False), x / 2) + assert_array_equal(evaluate("x/2", truediv='auto'), x / 2) + assert_array_equal(evaluate("x/2", truediv=True), x / 2.0) + + def test_left_shift(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x<<2"), x << 2) + + def test_right_shift(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x>>2"), x >> 2) + + # PyTables uses __nonzero__ among ExpressionNode objects internally + # so this should be commented out for the moment. See #24. + def test_boolean_operator(self): + x = arange(10, dtype='i4') + try: + evaluate("(x > 1) and (x < 9)") + except TypeError: + pass + else: + raise ValueError("should raise exception!") + + def test_rational_expr(self): + a = arange(1e6) + b = arange(1e6) * 0.1 + x = (a + 2 * b) / (1 + a + 4 * b * b) + y = evaluate("(a + 2*b) / (1 + a + 4*b*b)") + assert_array_almost_equal(x, y) + + def test_complex_expr(self): + def complex(a, b): + c = zeros(a.shape, dtype=cdouble) + c.real = a + c.imag = b + return c + + a = arange(1e4) + b = arange(1e4) ** 1e-5 + z = a + 1j * b + x = z.imag + x = sin(complex(a, b)).real + z.imag + y = evaluate("sin(complex(a, b)).real + z.imag") + assert_array_almost_equal(x, y) + + def test_complex_strides(self): + a = arange(100).reshape(10, 10)[::2] + b = arange(50).reshape(5, 10) + assert_array_equal(evaluate("a+b"), a + b) + c = empty([10], dtype=[('c1', int32), ('c2', uint16)]) + c['c1'] = arange(10) + c['c2'].fill(0xaaaa) + c1 = c['c1'] + a0 = a[0] + assert_array_equal(evaluate("c1"), c1) + assert_array_equal(evaluate("a0+c1"), a0 + c1) + + def test_recarray_strides(self): + a = arange(100) + b = arange(100,200) + recarr = np.rec.array(None, formats='f4,f4', shape=(100,)) + recarr['f0'] = a + recarr['f1'] = b + c = recarr['f1'] + assert_array_almost_equal(evaluate("sqrt(c) > 1."), sqrt(c) > 1.) + assert_array_almost_equal(evaluate("log10(c)"), log10(c)) + + def test_broadcasting(self): + a = arange(100).reshape(10, 10)[::2] + c = arange(10) + d = arange(5).reshape(5, 1) + assert_array_equal(evaluate("a+c"), a + c) + assert_array_equal(evaluate("a+d"), a + d) + expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)]) + assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c) + + def test_all_scalar(self): + a = 3. + b = 4. + assert_allclose(evaluate("a+b"), a + b) + expr = NumExpr("2*a+3*b", [('a', double), ('b', double)]) + assert_equal(expr(a, b), 2 * a + 3 * b) + + def test_run(self): + a = arange(100).reshape(10, 10)[::2] + b = arange(10) + expr = NumExpr("2*a+3*b", [('a', double), ('b', double)]) + assert_array_equal(expr(a, b), expr.run(a, b)) + + def test_illegal_value(self): + a = arange(3) + try: + evaluate("a < [0, 0, 0]") + except (ValueError, TypeError): + pass + else: + self.fail() + + def test_sanitize(self): + with _environment('NUMEXPR_SANITIZE', '1'): + # Forbid dunder + try: + evaluate('__builtins__') + except ValueError: + pass + else: + self.fail() + + # Forbid colon for lambda funcs + try: + evaluate('lambda x: x') + except ValueError: + pass + else: + self.fail() + + # Forbid indexing + try: + evaluate('locals()["evaluate"]') + except ValueError: + pass + else: + self.fail() + + # Forbid semicolon + try: + evaluate('import os;') + except ValueError: + pass + else: + self.fail() + + # Attribute access with spaces + try: + evaluate('os. cpu_count()') + except ValueError: + pass + else: + self.fail() + + # Attribute access with funny unicode characters that eval translates + # into ASCII. + try: + evaluate("(3+1).ᵇit_length()") + except ValueError: + pass + else: + self.fail() + + # Pass decimal points including scientific notation + a = 3.0 + evaluate('a*2.e-5') + evaluate('a*2.e+5') + evaluate('a*2e-5') + evaluate('a*2e+5') + evaluate('a*2E-5') + evaluate('a*2.0e5') + evaluate('a*2.2e5') + evaluate('2.+a') + + # pass .real and .imag + c = 2.5 + 1.5j + evaluate('c.real') + evaluate('c.imag') + + # pass imaginary unit j + evaluate('1.5j') + evaluate('3.j') + + # pass forbidden characters within quotes + x = np.array(['a', 'b'], dtype=bytes) + evaluate("x == 'b:'") + + + def test_no_sanitize(self): + try: # Errors on compile() after eval() + evaluate('import os;', sanitize=False) + except SyntaxError: + pass + else: + self.fail() + + with _environment('NUMEXPR_SANITIZE', '0'): + try: # Errors on compile() after eval() + evaluate('import os;', sanitize=None) + except SyntaxError: + pass + else: + self.fail() + + def test_disassemble(self): + assert_equal(disassemble(NumExpr( + "where(m, a, -1)", [('m', bool), ('a', float)])), + [[b'where_fbff', b'r0', b'r1[m]', b'r2[a]', b'c3[-1.0]'], + [b'noop', None, None, None]]) + + def test_constant_deduplication(self): + assert_equal(NumExpr("(a + 1)*(a - 1)", [('a', np.int32)]).constants, (1,)) + + def test_nan_constant(self): + assert_equal(str(ConstantNode(float("nan")).value), 'nan') + + # check de-duplication works for nan + _nan = ConstantNode(float("nan")) + expr = (E.a + _nan)*(E.b + _nan) + assert_equal(NumExpr(expr, [('a', double), ('b', double)]).constants, (float("nan"),)) + + + def test_f32_constant(self): + assert_equal(ConstantNode(numpy.float32(1)).astKind, "float") + assert_equal(ConstantNode(numpy.float32("nan")).astKind, "float") + assert_equal(ConstantNode(numpy.float32(3)).value.dtype, numpy.dtype("float32")) + assert_array_equal(NumExpr(ConstantNode(numpy.float32(1))).run(), + numpy.array(1, dtype="float32")) + + def test_unaligned_singleton(self): + # Test for issue #397 whether singletons outputs assigned to consts must be + # aligned or not. + a = np.empty(5, dtype=np.uint8)[1:].view(np.int32) + evaluate('3', out=a) + assert_equal(a, 3) + + def test_negative_mod(self): + # Test for issue #413, modulus of negative integers. C modulus is + # actually remainder op, and hence different from Python modulus. + a = np.array([-500, -135, 0, 0, 135, 500], dtype=np.int32) + n = np.array([-360, -360, -360, 360, 360, 360], dtype=np.int32) + out_i = evaluate('a % n') + assert_equal(out_i, np.mod(a, n)) + + b = a.astype(np.int64) + m = n.astype(np.int64) + out_l = evaluate('b % m') + assert_equal(out_l, np.mod(b, m)) + + def test_negative_power_scalar(self): + # Test for issue #428, where the power is negative and the base is an + # integer. This was running afoul in the precomputation in `expressions.py:pow_op()` + base = np.array([-2, -1, 1, 2, 3], dtype=np.int32) + out_i = evaluate('base ** -1.0') + assert_equal(out_i, np.power(base, -1.0)) + + base = np.array([-2, -1, 1, 2, 3], dtype=np.int64) + out_l = evaluate('base ** -1.0') + assert_equal(out_l, np.power(base, -1.0)) + + + def test_ex_uses_vml(self): + vml_funcs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan", + "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", + "log", "log1p","log10", "exp", "expm1", "abs", "conj", + "arctan2", "fmod"] + for func in vml_funcs: + strexpr = func+'(a)' + _, ex_uses_vml = numexpr.necompiler.getExprNames(strexpr, {}) + assert_equal(ex_uses_vml, use_vml, strexpr) + + if 'sparc' not in platform.machine(): + # Execution order set here so as to not use too many threads + # during the rest of the execution. See #33 for details. + def test_changing_nthreads_00_inc(self): + a = linspace(-1, 1, 1000000) + b = ((.25 * a + .75) * a - 1.5) * a - 2 + for nthreads in range(1, 7): + numexpr.set_num_threads(nthreads) + c = evaluate("((.25*a + .75)*a - 1.5)*a - 2") + assert_array_almost_equal(b, c) + + def test_changing_nthreads_01_dec(self): + a = linspace(-1, 1, 1000000) + b = ((.25 * a + .75) * a - 1.5) * a - 2 + for nthreads in range(6, 1, -1): + numexpr.set_num_threads(nthreads) + c = evaluate("((.25*a + .75)*a - 1.5)*a - 2") + assert_array_almost_equal(b, c) + + +tests = [ + ('MISC', ['b*c+d*e', + '2*a+3*b', + '-a', + 'sinh(a)', + '2*a + (cos(3)+5)*sinh(cos(b))', + '2*a + arctan2(a, b)', + 'arcsin(0.5)', + 'where(a != 0.0, 2, a)', + 'where(a > 10, b < a, b > a)', + 'where((a-10).real != 0.0, a, 2)', + '0.25 * (a < 5) + 0.33 * (a >= 5)', + 'cos(1+1)', + '1+1', + '1', + 'cos(a2)', + ])] + +optests = [] +for op in list('+-*/%') + ['**']: + optests.append("(a+1) %s (b+3)" % op) + optests.append("3 %s (b+3)" % op) + optests.append("(a+1) %s 4" % op) + optests.append("2 %s (b+3)" % op) + optests.append("(a+1) %s 2" % op) + optests.append("(a+1) %s -1" % op) + optests.append("(a+1) %s 0.5" % op) + # Check divisions and modulus by zero (see ticket #107) + optests.append("(a+1) %s 0" % op) +tests.append(('OPERATIONS', optests)) + +cmptests = [] +for op in ['<', '<=', '==', '>=', '>', '!=']: + cmptests.append("a/2+5 %s b" % op) + cmptests.append("a/2+5 %s 7" % op) + cmptests.append("7 %s b" % op) + cmptests.append("7.0 %s 5" % op) +tests.append(('COMPARISONS', cmptests)) + +func1tests = [] +for func in ['copy', 'ones_like', 'sqrt', + 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', + 'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj', + 'ceil', 'floor']: + func1tests.append("a + %s(b+c)" % func) +tests.append(('1_ARG_FUNCS', func1tests)) + +func2tests = [] +for func in ['arctan2', 'fmod']: + func2tests.append("a + %s(b+c, d+1)" % func) + func2tests.append("a + %s(b+c, 1)" % func) + func2tests.append("a + %s(1, d+1)" % func) +tests.append(('2_ARG_FUNCS', func2tests)) + +powtests = [] +# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS" +for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3): + powtests.append("(a+1)**%s" % n) +tests.append(('POW_TESTS', powtests)) + + +def equal(a, b, exact): + if array_equal(a, b): + return True + + if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']: + nnans = isnan(a).sum() + if nnans > 0: + # For results containing NaNs, just check that the number + # of NaNs is the same in both arrays. This check could be + # made more exhaustive, but checking element by element in + # python space is very expensive in general. + return nnans == isnan(b).sum() + ninfs = isinf(a).sum() + if ninfs > 0: + # Ditto for Inf's + return ninfs == isinf(b).sum() + if exact: + return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0) + else: + if hasattr(a, 'dtype') and a.dtype == 'f4': + atol = 1e-5 # Relax precision for special opcodes, like fmod + else: + atol = 1e-8 + return (shape(a) == shape(b) and + allclose(ravel(a), ravel(b), atol=atol)) + + +class Skip(Exception): pass + + +def test_expressions(): + test_no = [0] + + def make_test_method(a, a2, b, c, d, e, x, expr, + test_scalar, dtype, optimization, exact, section): + this_locals = locals() + + def method(): + try: + # We don't want to listen at RuntimeWarnings like + # "overflows" or "divide by zero" in plain eval(). + warnings.simplefilter("ignore") + npval = eval(expr, globals(), this_locals) + warnings.simplefilter("always") + npval = eval(expr, globals(), this_locals) + except Exception as ex: + # just store the exception in a variable + # compatibility with numpy v1.12 + # see also https://github.com/pydata/numexpr/issues/239 + np_exception = ex + npval = None + else: + np_exception = None + + try: + neval = evaluate(expr, local_dict=this_locals, + optimization=optimization) + except AssertionError: + raise + except NotImplementedError: + print('%r not implemented for %s (scalar=%d, opt=%s)' + % (expr, dtype.__name__, test_scalar, optimization)) + except Exception as ne_exception: + same_exc_type = issubclass(type(ne_exception), + type(np_exception)) + if np_exception is None or not same_exc_type: + print('numexpr error for expression %r' % (expr,)) + raise + except: + print('numexpr error for expression %r' % (expr,)) + raise + else: + msg = ('expected numexpr error not raised for expression ' + '%r' % (expr,)) + assert np_exception is None, msg + + assert equal(npval, neval, exact), """%r +(test_scalar=%r, dtype=%r, optimization=%r, exact=%r, + npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__, + optimization, exact, + npval, type(npval), shape(npval), + neval, type(neval), shape(neval)) + + method.description = ('test_expressions(%s, test_scalar=%r, ' + 'dtype=%r, optimization=%r, exact=%r)') % (expr, test_scalar, dtype.__name__, optimization, exact) + test_no[0] += 1 + method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar, + dtype.__name__, + optimization.encode('ascii'), + section.encode('ascii'), + test_no[0]) + return method + + x = None + for test_scalar in (0, 1, 2): + for dtype in (int, int, np.float32, double, complex): + array_size = 100 + a = arange(2 * array_size, dtype=dtype)[::2] + a2 = zeros([array_size, array_size], dtype=dtype) + b = arange(array_size, dtype=dtype) / array_size + c = arange(array_size, dtype=dtype) + d = arange(array_size, dtype=dtype) + e = arange(array_size, dtype=dtype) + if dtype == complex: + a = a.real + for x in [a2, b, c, d, e]: + x += 1j + x *= 1 + 1j + if test_scalar == 1: + a = a[array_size // 2] + if test_scalar == 2: + b = b[array_size // 2] + for optimization, exact in [ + ('none', False), ('moderate', False), ('aggressive', False)]: + for section_name, section_tests in tests: + for expr in section_tests: + if (dtype == complex and + ('<' in expr or '>' in expr or '%' in expr + or "arctan2" in expr or "fmod" in expr + or "floor" in expr or "ceil" in expr)): + # skip complex comparisons or functions not + # defined in complex domain. + continue + if (dtype in (int, int) and test_scalar and + expr == '(a+1) ** -1'): + continue + + m = make_test_method(a, a2, b, c, d, e, x, + expr, test_scalar, dtype, + optimization, exact, + section_name) + yield m + + +class test_int64(TestCase): + def test_neg(self): + a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64) + res = evaluate('-a') + assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63]) + self.assertEqual(res.dtype.name, 'int64') + + +class test_int32_int64(TestCase): + + def test_small_int(self): + # Small ints (32-bit ones) should not be promoted to longs. + res = evaluate('2') + assert_array_equal(res, 2) + self.assertEqual(res.dtype.name, 'int32') + + def test_big_int(self): + # Big ints should be promoted to longs. + res = evaluate('2**40') + assert_array_equal(res, 2 ** 40) + self.assertEqual(res.dtype.name, 'int64') + + def test_long_constant_promotion(self): + int32array = arange(100, dtype='int32') + itwo = np.int32(2) + ltwo = np.int64(2) + res = int32array * 2 + res32 = evaluate('int32array * itwo') + res64 = evaluate('int32array * ltwo') + assert_array_equal(res, res32) + assert_array_equal(res, res64) + self.assertEqual(res32.dtype.name, 'int32') + self.assertEqual(res64.dtype.name, 'int64') + + def test_int64_array_promotion(self): + int32array = arange(100, dtype='int32') + int64array = arange(100, dtype='int64') + respy = int32array * int64array + resnx = evaluate('int32array * int64array') + assert_array_equal(respy, resnx) + self.assertEqual(resnx.dtype.name, 'int64') + + +class test_uint32_int64(TestCase): + def test_small_uint32(self): + # Small uint32 should not be downgraded to ints. + a = np.uint32(42) + res = evaluate('a') + assert_array_equal(res, 42) + self.assertEqual(res.dtype.name, 'int64') + + def test_uint32_constant_promotion(self): + int32array = arange(100, dtype='int32') + stwo = np.int32(2) + utwo = np.uint32(2) + res = int32array * utwo + res32 = evaluate('int32array * stwo') + res64 = evaluate('int32array * utwo') + assert_array_equal(res, res32) + assert_array_equal(res, res64) + self.assertEqual(res32.dtype.name, 'int32') + self.assertEqual(res64.dtype.name, 'int64') + + def test_int64_array_promotion(self): + uint32array = arange(100, dtype='uint32') + int64array = arange(100, dtype='int64') + respy = uint32array * int64array + resnx = evaluate('uint32array * int64array') + assert_array_equal(respy, resnx) + self.assertEqual(resnx.dtype.name, 'int64') + + +class test_strings(TestCase): + BLOCK_SIZE1 = 128 + BLOCK_SIZE2 = 8 + str_list1 = [b'foo', b'bar', b'', b' '] + str_list2 = [b'foo', b'', b'x', b' '] + str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1) + str_array1 = array(str_list1 * str_nloops) + str_array2 = array(str_list2 * str_nloops) + str_constant = b'doodoo' + + def test_null_chars(self): + str_list = [ + b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0', + b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0'] + for s in str_list: + r = evaluate('s') + self.assertEqual(s, r.tobytes()) # check *all* stored data + + def test_compare_copy(self): + sarr = self.str_array1 + expr = 'sarr' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_array(self): + sarr1 = self.str_array1 + sarr2 = self.str_array2 + expr = 'sarr1 >= sarr2' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_variable(self): + sarr = self.str_array1 + svar = self.str_constant + expr = 'sarr >= svar' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_constant(self): + sarr = self.str_array1 + expr = 'sarr >= %r' % self.str_constant + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_add_string_array(self): + sarr1 = self.str_array1 + sarr2 = self.str_array2 + expr = 'sarr1 + sarr2' + self.assert_missing_op('add_sss', expr, locals()) + + def test_empty_string1(self): + a = np.array([b"", b"pepe"]) + b = np.array([b"pepe2", b""]) + res = evaluate("(a == b'') & (b == b'pepe2')") + assert_array_equal(res, np.array([True, False])) + res2 = evaluate("(a == b'pepe') & (b == b'')") + assert_array_equal(res2, np.array([False, True])) + + def test_empty_string2(self): + a = np.array([b"p", b"pepe"]) + b = np.array([b"pepe2", b""]) + res = evaluate("(a == b'') & (b == b'pepe2')") + assert_array_equal(res, np.array([False, False])) + res2 = evaluate("(a == b'pepe') & (b == b'')") + assert_array_equal(res, np.array([False, False])) + + def test_add_numeric_array(self): + sarr = self.str_array1 + narr = arange(len(sarr), dtype='int32') + expr = 'sarr >= narr' + self.assert_missing_op('ge_bsi', expr, locals()) + + def assert_missing_op(self, op, expr, local_dict): + msg = "expected NotImplementedError regarding '%s'" % op + try: + evaluate(expr, local_dict) + except NotImplementedError as nie: + if "'%s'" % op not in nie.args[0]: + self.fail(msg) + else: + self.fail(msg) + + def test_compare_prefix(self): + # Check comparing two strings where one is a prefix of the + # other. + for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'), + (b'foo\0a', b'foo\0bar')]: + self.assertTrue(evaluate('s1 < s2')) + self.assertTrue(evaluate('s1 <= s2')) + self.assertTrue(evaluate('~(s1 == s2)')) + self.assertTrue(evaluate('~(s1 >= s2)')) + self.assertTrue(evaluate('~(s1 > s2)')) + + # Check for NumPy array-style semantics in string equality. + s1, s2 = b'foo', b'foo\0\0' + self.assertTrue(evaluate('s1 == s2')) + + +# Case for testing selections in fields which are aligned but whose +# data length is not an exact multiple of the length of the record. +# The following test exposes the problem only in 32-bit machines, +# because in 64-bit machines 'c2' is unaligned. However, this should +# check most platforms where, while not unaligned, 'len(datatype) > +# boundary_alignment' is fullfilled. +class test_irregular_stride(TestCase): + def test_select(self): + f0 = arange(10, dtype=int32) + f1 = arange(10, dtype=float64) + + irregular = rec.fromarrays([f0, f1]) + + f0 = irregular['f0'] + f1 = irregular['f1'] + + i0 = evaluate('f0 < 5') + i1 = evaluate('f1 < 5') + + assert_array_equal(f0[i0], arange(5, dtype=int32)) + assert_array_equal(f1[i1], arange(5, dtype=float64)) + + +# Cases for testing arrays with dimensions that can be zero. +class test_zerodim(TestCase): + def test_zerodim1d(self): + a0 = array([], dtype=int32) + a1 = array([], dtype=float64) + + r0 = evaluate('a0 + a1') + r1 = evaluate('a0 * a1') + + assert_array_equal(r0, a1) + assert_array_equal(r1, a1) + + def test_zerodim3d(self): + a0 = array([], dtype=int32).reshape(0, 2, 4) + a1 = array([], dtype=float64).reshape(0, 2, 4) + + r0 = evaluate('a0 + a1') + r1 = evaluate('a0 * a1') + + assert_array_equal(r0, a1) + assert_array_equal(r1, a1) + + +@contextmanager +def _environment(key, value): + old = os.environ.get(key) + os.environ[key] = value + try: + yield + finally: + if old: + os.environ[key] = old + else: + del os.environ[key] + +# Test cases for the threading configuration +class test_threading_config(TestCase): + def test_max_threads_unset(self): + # Has to be done in a subprocess as `importlib.reload` doesn't let us + # re-initialize the threadpool + script = '\n'.join([ + "import os", + "if 'NUMEXPR_MAX_THREADS' in os.environ: os.environ.pop('NUMEXPR_MAX_THREADS')", + "if 'OMP_NUM_THREADS' in os.environ: os.environ.pop('OMP_NUM_THREADS')", + "import numexpr", + "assert(numexpr.nthreads <= 8)", + "exit(0)"]) + subprocess.check_call([sys.executable, '-c', script]) + + def test_max_threads_set(self): + # Has to be done in a subprocess as `importlib.reload` doesn't let us + # re-initialize the threadpool + script = '\n'.join([ + "import os", + "os.environ['NUMEXPR_MAX_THREADS'] = '4'", + "import numexpr", + "assert(numexpr.MAX_THREADS == 4)", + "exit(0)"]) + subprocess.check_call([sys.executable, '-c', script]) + + def test_numexpr_num_threads(self): + with _environment('OMP_NUM_THREADS', '5'): + # NUMEXPR_NUM_THREADS has priority + with _environment('NUMEXPR_NUM_THREADS', '3'): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(3, numexpr._init_num_threads()) + + def test_omp_num_threads(self): + with _environment('OMP_NUM_THREADS', '5'): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(5, numexpr._init_num_threads()) + + def test_omp_num_threads_empty_string(self): + with _environment('OMP_NUM_THREADS', ''): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads()) + + def test_numexpr_max_threads_empty_string(self): + with _environment('NUMEXPR_MAX_THREADS', ''): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads()) + + def test_vml_threads_round_trip(self): + n_threads = 3 + if use_vml: + numexpr.utils.set_vml_num_threads(n_threads) + set_threads = numexpr.utils.get_vml_num_threads() + self.assertEqual(n_threads, set_threads) + else: + self.assertIsNone(numexpr.utils.set_vml_num_threads(n_threads)) + self.assertIsNone(numexpr.utils.get_vml_num_threads()) + + +# Case test for threads +class test_threading(TestCase): + + def test_thread(self): + import threading + + class ThreadTest(threading.Thread): + def run(self): + a = arange(3) + assert_array_equal(evaluate('a**3'), array([0, 1, 8])) + + test = ThreadTest() + test.start() + test.join() + + def test_multithread(self): + import threading + + # Running evaluate() from multiple threads shouldn't crash + def work(n): + a = arange(n) + evaluate('a+a') + + work(10) # warm compilation cache + + nthreads = 30 + threads = [threading.Thread(target=work, args=(1e5,)) + for i in range(nthreads)] + for t in threads: + t.start() + for t in threads: + t.join() + + +# The worker function for the subprocess (needs to be here because Windows +# has problems pickling nested functions with the multiprocess module :-/) +def _worker(qout=None): + ra = np.arange(1e3) + rows = evaluate('ra > 0') + #print "Succeeded in evaluation!\n" + if qout is not None: + qout.put("Done") + + +# Case test for subprocesses (via multiprocessing module) +class test_subprocess(TestCase): + def test_multiprocess(self): + try: + import multiprocessing as mp + except ImportError: + return + # Check for two threads at least + numexpr.set_num_threads(2) + #print "**** Running from main process:" + _worker() + #print "**** Running from subprocess:" + qout = mp.Queue() + ps = mp.Process(target=_worker, args=(qout,)) + ps.daemon = True + ps.start() + + result = qout.get() + #print result + + +def print_versions(): + """Print the versions of software that numexpr relies on.""" + # from pkg_resources import parse_version + from numexpr.cpuinfo import cpu + import platform + + print('-=' * 38) + print('Numexpr version: %s' % numexpr.__version__) + print('NumPy version: %s' % np.__version__) + print('Python version: %s' % sys.version) + (sysname, nodename, release, os_version, machine, processor) = platform.uname() + print('Platform: %s-%s-%s' % (sys.platform, machine, os_version)) + try: + # cpuinfo doesn't work on OSX well it seems, so protect these outputs + # with a try block + cpu_info = cpu.info[0] + print('CPU vendor: %s' % cpu_info.get('VendorIdentifier', '')) + print('CPU model: %s' % cpu_info.get('ProcessorNameString', '')) + print('CPU clock speed: %s MHz' % cpu_info.get('~MHz','')) + except KeyError: + pass + print('VML available? %s' % use_vml) + if use_vml: + print('VML/MKL version: %s' % numexpr.get_vml_version()) + print('Number of threads used by default: %d ' + '(out of %d detected cores)' % (numexpr.nthreads, numexpr.ncores)) + print('Maximum number of threads: %s' % numexpr.MAX_THREADS) + print('-=' * 38) + + +def test(verbosity=1): + """ + Run all the tests in the test suite. + """ + print_versions() + # For some reason, NumPy issues all kinds of warnings when using Python3. + # Ignoring them in tests should be ok, as all results are checked out. + # See https://github.com/pydata/numexpr/issues/183 for details. + np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore') + return unittest.TextTestRunner(verbosity=verbosity).run(suite()) + + +test.__test__ = False + + +def suite(): + import unittest + import platform as pl + + theSuite = unittest.TestSuite() + niter = 1 + + class TestExpressions(TestCase): + pass + + def add_method(func): + def method(self): + return func() + + setattr(TestExpressions, func.__name__, + method.__get__(None, TestExpressions)) + + for func in test_expressions(): + add_method(func) + + for n in range(niter): + theSuite.addTest(unittest.makeSuite(test_numexpr)) + if 'sparc' not in platform.machine(): + theSuite.addTest(unittest.makeSuite(test_numexpr2)) + theSuite.addTest(unittest.makeSuite(test_evaluate)) + theSuite.addTest(unittest.makeSuite(TestExpressions)) + theSuite.addTest(unittest.makeSuite(test_int32_int64)) + theSuite.addTest(unittest.makeSuite(test_uint32_int64)) + theSuite.addTest(unittest.makeSuite(test_strings)) + theSuite.addTest( + unittest.makeSuite(test_irregular_stride)) + theSuite.addTest(unittest.makeSuite(test_zerodim)) + theSuite.addTest(unittest.makeSuite(test_threading_config)) + + # multiprocessing module is not supported on Hurd/kFreeBSD + if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')): + theSuite.addTest(unittest.makeSuite(test_subprocess)) + + # I need to put this test after test_subprocess because + # if not, the test suite locks immediately before test_subproces. + # This only happens with Windows, so I suspect of a subtle bad + # interaction with threads and subprocess :-/ + theSuite.addTest(unittest.makeSuite(test_threading)) + + return theSuite + + +if __name__ == '__main__': + print_versions() + unittest.main(defaultTest='suite') +# suite = suite() +# unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/utils.py b/env-llmeval/lib/python3.10/site-packages/numexpr/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..073b879e12656eb7961e105b1284b3705d76e30b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/utils.py @@ -0,0 +1,228 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +import logging +log = logging.getLogger(__name__) + +import os +import subprocess + +from numexpr.interpreter import _set_num_threads, _get_num_threads, MAX_THREADS +from numexpr import use_vml +from . import version + +if use_vml: + from numexpr.interpreter import ( + _get_vml_version, _set_vml_accuracy_mode, _set_vml_num_threads, + _get_vml_num_threads) + + +def get_vml_version(): + """ + Get the VML/MKL library version. + """ + if use_vml: + return _get_vml_version() + else: + return None + + +def set_vml_accuracy_mode(mode): + """ + Set the accuracy mode for VML operations. + + The `mode` parameter can take the values: + - 'high': high accuracy mode (HA), <1 least significant bit + - 'low': low accuracy mode (LA), typically 1-2 least significant bits + - 'fast': enhanced performance mode (EP) + - None: mode settings are ignored + + This call is equivalent to the `vmlSetMode()` in the VML library. + See: + + http://www.intel.com/software/products/mkl/docs/webhelp/vml/vml_DataTypesAccuracyModes.html + + for more info on the accuracy modes. + + Returns old accuracy settings. + """ + if use_vml: + acc_dict = {None: 0, 'low': 1, 'high': 2, 'fast': 3} + acc_reverse_dict = {1: 'low', 2: 'high', 3: 'fast'} + if mode not in list(acc_dict.keys()): + raise ValueError( + "mode argument must be one of: None, 'high', 'low', 'fast'") + retval = _set_vml_accuracy_mode(acc_dict.get(mode, 0)) + return acc_reverse_dict.get(retval) + else: + return None + + +def set_vml_num_threads(nthreads): + """ + Suggests a maximum number of threads to be used in VML operations. + + This function is equivalent to the call + `mkl_domain_set_num_threads(nthreads, MKL_DOMAIN_VML)` in the MKL + library. See: + + http://www.intel.com/software/products/mkl/docs/webhelp/support/functn_mkl_domain_set_num_threads.html + + for more info about it. + """ + if use_vml: + _set_vml_num_threads(nthreads) + pass + +def get_vml_num_threads(): + """ + Gets the maximum number of threads to be used in VML operations. + + This function is equivalent to the call + `mkl_domain_get_max_threads (MKL_DOMAIN_VML)` in the MKL + library. See: + + http://software.intel.com/en-us/node/522118 + + for more info about it. + """ + if use_vml: + return _get_vml_num_threads() + return None + +def set_num_threads(nthreads): + """ + Sets a number of threads to be used in operations. + + DEPRECATED: returns the previous setting for the number of threads. + + During initialization time NumExpr sets this number to the number + of detected cores in the system (see `detect_number_of_cores()`). + """ + old_nthreads = _set_num_threads(nthreads) + return old_nthreads + +def get_num_threads(): + """ + Gets the number of threads currently in use for operations. + """ + return _get_num_threads() + +def _init_num_threads(): + """ + Detects the environment variable 'NUMEXPR_MAX_THREADS' to set the threadpool + size, and if necessary the slightly redundant 'NUMEXPR_NUM_THREADS' or + 'OMP_NUM_THREADS' env vars to set the initial number of threads used by + the virtual machine. + """ + # Any platform-specific short-circuits + if 'sparc' in version.platform_machine: + log.warning('The number of threads have been set to 1 because problems related ' + 'to threading have been reported on some sparc machine. ' + 'The number of threads can be changed using the "set_num_threads" ' + 'function.') + set_num_threads(1) + return 1 + + env_configured = False + n_cores = detect_number_of_cores() + if ('NUMEXPR_MAX_THREADS' in os.environ and os.environ['NUMEXPR_MAX_THREADS'] != '' or + 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != ''): + # The user has configured NumExpr in the expected way, so suppress logs. + env_configured = True + n_cores = MAX_THREADS + else: + # The use has not set 'NUMEXPR_MAX_THREADS', so likely they have not + # configured NumExpr as desired, so we emit info logs. + if n_cores > MAX_THREADS: + log.info('Note: detected %d virtual cores but NumExpr set to maximum of %d, check "NUMEXPR_MAX_THREADS" environment variable.'%(n_cores, MAX_THREADS)) + if n_cores > 8: + # The historical 'safety' limit. + log.info('Note: NumExpr detected %d cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.'%n_cores) + n_cores = 8 + + # Now we check for 'NUMEXPR_NUM_THREADS' or 'OMP_NUM_THREADS' to set the + # actual number of threads used. + if 'NUMEXPR_NUM_THREADS' in os.environ and os.environ['NUMEXPR_NUM_THREADS'] != '': + requested_threads = int(os.environ['NUMEXPR_NUM_THREADS']) + elif 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != '': + # Empty string is commonly used to unset the variable + requested_threads = int(os.environ['OMP_NUM_THREADS']) + else: + requested_threads = n_cores + if not env_configured: + log.info('NumExpr defaulting to %d threads.'%n_cores) + + # The C-extension function performs its own checks against `MAX_THREADS` + set_num_threads(requested_threads) + return requested_threads + + +def detect_number_of_cores(): + """ + Detects the number of cores on a system. Cribbed from pp. + """ + # Linux, Unix and MacOS: + if hasattr(os, "sysconf"): + if "SC_NPROCESSORS_ONLN" in os.sysconf_names: + # Linux & Unix: + ncpus = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"])) + # Windows: + try: + ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", "")) + if ncpus > 0: + return ncpus + except ValueError: + pass + return 1 # Default + + +def detect_number_of_threads(): + """ + DEPRECATED: use `_init_num_threads` instead. + If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide + """ + log.warning('Deprecated, use `init_num_threads` instead.') + try: + nthreads = int(os.environ.get('NUMEXPR_NUM_THREADS', '')) + except ValueError: + try: + nthreads = int(os.environ.get('OMP_NUM_THREADS', '')) + except ValueError: + nthreads = detect_number_of_cores() + + # Check that we don't surpass the MAX_THREADS in interpreter.cpp + if nthreads > MAX_THREADS: + nthreads = MAX_THREADS + return nthreads + + +class CacheDict(dict): + """ + A dictionary that prevents itself from growing too much. + """ + + def __init__(self, maxentries): + self.maxentries = maxentries + super(CacheDict, self).__init__(self) + + def __setitem__(self, key, value): + # Protection against growing the cache too much + if len(self) > self.maxentries: + # Remove a 10% of (arbitrary) elements from the cache + entries_to_remove = self.maxentries // 10 + for k in list(self.keys())[:entries_to_remove]: + super(CacheDict, self).__delitem__(k) + super(CacheDict, self).__setitem__(key, value) + diff --git a/env-llmeval/lib/python3.10/site-packages/numexpr/version.py b/env-llmeval/lib/python3.10/site-packages/numexpr/version.py new file mode 100644 index 0000000000000000000000000000000000000000..69b5bb37d147033182917b5b9a82e20935490832 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/numexpr/version.py @@ -0,0 +1,4 @@ +# THIS FILE IS GENERATED BY `SETUP.PY` +version = '2.10.0' +numpy_build_version = '2.0.0rc1' +platform_machine = 'x86_64' diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__init__.py b/env-llmeval/lib/python3.10/site-packages/requests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..300a16c5741d9ccb751185407694fe49e8da6bc5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/requests/__init__.py @@ -0,0 +1,180 @@ +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import warnings + +import urllib3 + +from .exceptions import RequestsDependencyWarning + +try: + from charset_normalizer import __version__ as charset_normalizer_version +except ImportError: + charset_normalizer_version = None + +try: + from chardet import __version__ as chardet_version +except ImportError: + chardet_version = None + + +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append("0") + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + raise Exception("You need either charset_normalizer or chardet installed") + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split("."))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) + warnings.warn(warning, RequestsDependencyWarning) + + +# Check imported dependencies for compatibility. +try: + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) +except (AssertionError, ValueError): + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) + +# Attempt to enable urllib3's fallback for SNI support +# if the standard library doesn't support SNI or the +# 'ssl' library isn't available. +try: + try: + import ssl + except ImportError: + ssl = None + + if not getattr(ssl, "HAS_SNI", False): + from urllib3.contrib import pyopenssl + + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from urllib3.exceptions import DependencyWarning + +warnings.simplefilter("ignore", DependencyWarning) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0638bd46682342bcc48a9fa6d2d7a9f1d7eeefd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/__version__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c7d85f35abe36d909e3950a143cef514ee5f8aa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/certs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/certs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def5e5a4a0b4b2ce5e61a5df181ba20b51e5f938 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/certs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46d8c17ee5f2c1cfd358ef459e1ca2700ce39f1d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec396851bf39bb0711a4d930815914d4938c19b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/cookies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad00f8bb016b5468e528a33af1af4b84ec3e8ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..844182dea91749c317fbdd1a5c9caa23173f810f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a092f7118143aed89e455f55e6d18c59edb82a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/requests/__pycache__/packages.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/requests/adapters.py b/env-llmeval/lib/python3.10/site-packages/requests/adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..78e3bb6ecf920b429c4ecab62434b1e630c01707 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/requests/adapters.py @@ -0,0 +1,538 @@ +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket # noqa: F401 + +from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError +from urllib3.exceptions import HTTPError as _HTTPError +from urllib3.exceptions import InvalidHeader as _InvalidHeader +from urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) +from urllib3.exceptions import ProxyError as _ProxyError +from urllib3.exceptions import ReadTimeoutError, ResponseError +from urllib3.exceptions import SSLError as _SSLError +from urllib3.poolmanager import PoolManager, proxy_from_url +from urllib3.util import Timeout as TimeoutSauce +from urllib3.util import parse_url +from urllib3.util.retry import Retry + +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) +from .models import Response +from .structures import CaseInsensitiveDict +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) + +try: + from urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +class BaseAdapter: + """The Base Transport Adapter""" + + def __init__(self): + super().__init__() + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session ` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super().__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) + + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith("socks"): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith("https") and verify: + + cert_loc = None + + # Allow self-specified cert location. + if verify is not True: + cert_loc = verify + + if not cert_loc: + cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + + if not cert_loc or not os.path.exists(cert_loc): + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) + + conn.cert_reqs = "CERT_REQUIRED" + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = "CERT_NONE" + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) + if conn.key_file and not os.path.exists(conn.key_file): + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) + + def build_response(self, req, resp): + """Builds a :class:`Response ` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter ` + + :param req: The :class:`PreparedRequest ` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, "status", None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode("utf-8") + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def get_connection(self, url, proxies=None): + """Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = proxy and scheme != "https" + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith("socks") + + url = request.path_url + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter `. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return headers + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection(request.url, proxies) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) + + chunked = not (request.body is None or "Content-Length" in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout, + chunked=chunked, + ) + + except (ProtocolError, OSError) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/env-llmeval/lib/python3.10/site-packages/requests/cookies.py b/env-llmeval/lib/python3.10/site-packages/requests/cookies.py new file mode 100644 index 0000000000000000000000000000000000000000..bf54ab237e410603061b8cec8fd195912d3cfb08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/requests/cookies.py @@ -0,0 +1,561 @@ +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `cookielib.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import calendar +import copy +import time + +from ._internal_utils import to_native_string +from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest: + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `cookielib.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get("Host"): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers["Host"], encoding="utf-8") + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse( + [ + parsed.scheme, + host, + parsed.path, + parsed.params, + parsed.query, + parsed.fragment, + ] + ) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookielib has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError( + "Cookie headers should be added with add_unredirected_header()" + ) + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse: + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `cookielib` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookielib` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, "_original_response") and response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get("Cookie") + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a cookielib.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name( + self, name, domain=kwargs.get("domain"), path=kwargs.get("path") + ) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if (domain is None or cookie.domain == domain) and ( + path is None or cookie.path == path + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super().__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if ( + hasattr(cookie.value, "startswith") + and cookie.value.startswith('"') + and cookie.value.endswith('"') + ): + cookie.value = cookie.value.replace('\\"', "") + return super().set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super().update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: + # if there are multiple cookies that meet passed in criteria + raise CookieConflictError( + f"There are multiple cookies with name, {name!r}" + ) + # we will eventually return this as long as no cookie conflict + toReturn = cookie.value + + if toReturn: + return toReturn + raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}") + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop("_cookies_lock") + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if "_cookies_lock" not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, "copy"): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + "version": 0, + "name": name, + "value": value, + "port": None, + "domain": "", + "path": "/", + "secure": False, + "expires": None, + "discard": True, + "comment": None, + "comment_url": None, + "rest": {"HttpOnly": None}, + "rfc2109": False, + } + + badargs = set(kwargs) - set(result) + if badargs: + raise TypeError( + f"create_cookie() got unexpected keyword arguments: {list(badargs)}" + ) + + result.update(kwargs) + result["port_specified"] = bool(result["port"]) + result["domain_specified"] = bool(result["domain"]) + result["domain_initial_dot"] = result["domain"].startswith(".") + result["path_specified"] = bool(result["path"]) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel["max-age"]: + try: + expires = int(time.time() + int(morsel["max-age"])) + except ValueError: + raise TypeError(f"max-age: {morsel['max-age']} must be integer") + elif morsel["expires"]: + time_template = "%a, %d-%b-%Y %H:%M:%S GMT" + expires = calendar.timegm(time.strptime(morsel["expires"], time_template)) + return create_cookie( + comment=morsel["comment"], + comment_url=bool(morsel["comment"]), + discard=False, + domain=morsel["domain"], + expires=expires, + name=morsel.key, + path=morsel["path"], + port=None, + rest={"HttpOnly": morsel["httponly"]}, + rfc2109=False, + secure=bool(morsel["secure"]), + value=morsel.value, + version=morsel["version"] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError("You can only merge into CookieJar") + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/env-llmeval/lib/python3.10/site-packages/requests/models.py b/env-llmeval/lib/python3.10/site-packages/requests/models.py new file mode 100644 index 0000000000000000000000000000000000000000..617a4134e556774953a56d10a2f8f211b15a605f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/requests/models.py @@ -0,0 +1,1034 @@ +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. +import encodings.idna # noqa: F401 +from io import UnsupportedOperation + +from urllib3.exceptions import ( + DecodeError, + LocationParseError, + ProtocolError, + ReadTimeoutError, + SSLError, +) +from urllib3.fields import RequestField +from urllib3.filepost import encode_multipart_formdata +from urllib3.util import parse_url + +from ._internal_utils import to_native_string, unicode_is_ascii +from .auth import HTTPBasicAuth +from .compat import ( + Callable, + JSONDecodeError, + Mapping, + basestring, + builtin_str, + chardet, + cookielib, +) +from .compat import json as complexjson +from .compat import urlencode, urlsplit, urlunparse +from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header +from .exceptions import ( + ChunkedEncodingError, + ConnectionError, + ContentDecodingError, + HTTPError, + InvalidJSONError, + InvalidURL, +) +from .exceptions import JSONDecodeError as RequestsJSONDecodeError +from .exceptions import MissingSchema +from .exceptions import SSLError as RequestsSSLError +from .exceptions import StreamConsumedError +from .hooks import default_hooks +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( + check_header_validity, + get_auth_from_url, + guess_filename, + guess_json_utf, + iter_slices, + parse_header_links, + requote_uri, + stream_decode_response_unicode, + super_len, + to_key_val_list, +) + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin: + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = "/" + + url.append(path) + + query = p.query + if query: + url.append("?") + url.append(query) + + return "".join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, "read"): + return data + elif hasattr(data, "__iter__"): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, "__iter__"): + vs = [vs] + for v in vs: + if v is not None: + result.append( + ( + k.encode("utf-8") if isinstance(k, str) else k, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if not files: + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, "__iter__"): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + ( + field.decode("utf-8") + if isinstance(field, bytes) + else field, + v.encode("utf-8") if isinstance(v, str) else v, + ) + ) + + for (k, v) in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, "read"): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin: + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError(f'Unsupported event specified, with event name "{event}"') + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, "__iter__"): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request ` object. + + Used to prepare a :class:`PreparedRequest `, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + + """ + + def __init__( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for (k, v) in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return f"" + + def prepare(self): + """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest ` object, + containing the exact bytes that will be sent to the server. + + Instances are generated from a :class:`Request ` object, and + should not be instantiated manually; doing so may produce undesirable + effects. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + >>> r + + + >>> s = requests.Session() + >>> s.send(r) + + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare( + self, + method=None, + url=None, + headers=None, + files=None, + data=None, + params=None, + auth=None, + cookies=None, + hooks=None, + json=None, + ): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return f"" + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + import idna + + try: + host = idna.encode(host, uts46=True).decode("utf-8") + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/psf/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode("utf8") + else: + url = str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ":" in url and not url.lower().startswith("http"): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + raise MissingSchema( + f"Invalid URL {url!r}: No scheme supplied. " + f"Perhaps you meant https://{url}?" + ) + + if not host: + raise InvalidURL(f"Invalid URL {url!r}: No host supplied") + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL("URL has an invalid label.") + elif host.startswith(("*", ".")): + raise InvalidURL("URL has an invalid label.") + + # Carefully reconstruct the network location + netloc = auth or "" + if netloc: + netloc += "@" + netloc += host + if port: + netloc += f":{port}" + + # Bare domains aren't valid URLs. + if not path: + path = "/" + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = f"{query}&{enc_params}" + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = "application/json" + + try: + body = complexjson.dumps(json, allow_nan=False) + except ValueError as ve: + raise InvalidJSONError(ve, request=self) + + if not isinstance(body, bytes): + body = body.encode("utf-8") + + is_stream = all( + [ + hasattr(data, "__iter__"), + not isinstance(data, (basestring, list, tuple, Mapping)), + ] + ) + + if is_stream: + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + body = data + + if getattr(body, "tell", None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except OSError: + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError( + "Streamed bodies and files are mutually exclusive." + ) + + if length: + self.headers["Content-Length"] = builtin_str(length) + else: + self.headers["Transfer-Encoding"] = "chunked" + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, "read"): + content_type = None + else: + content_type = "application/x-www-form-urlencoded" + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ("content-type" not in self.headers): + self.headers["Content-Type"] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers["Content-Length"] = builtin_str(length) + elif ( + self.method not in ("GET", "HEAD") + and self.headers.get("Content-Length") is None + ): + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers["Content-Length"] = "0" + + def prepare_auth(self, auth, url=""): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest ` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers["Cookie"] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response: + """The :class:`Response ` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + "_content", + "status_code", + "headers", + "url", + "history", + "encoding", + "reason", + "cookies", + "elapsed", + "request", + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + #: This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response ` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, "_content_consumed", True) + setattr(self, "raw", None) + + def __repr__(self): + return f"" + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return "location" in self.headers and self.status_code in REDIRECT_STATI + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return "location" in self.headers and self.status_code in ( + codes.moved_permanently, + codes.permanent_redirect, + ) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the charset_normalizer or chardet libraries.""" + return chardet.detect(self.content)["encoding"] + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, "stream"): + try: + yield from self.raw.stream(chunk_size, decode_content=True) + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + except SSLError as e: + raise RequestsSSLError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError( + f"chunk_size must be an int, it is instead a {type(chunk_size)}." + ) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines( + self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None + ): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): + + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + yield from lines + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError("The content for this response was already consumed") + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b"" + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``charset_normalizer`` or ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return "" + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors="replace") + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors="replace") + + return content + + def json(self, **kwargs): + r"""Returns the json-encoded content of a response, if any. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises requests.exceptions.JSONDecodeError: If the response body does not + contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using charset_normalizer to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads(self.content.decode(encoding), **kwargs) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + except JSONDecodeError as e: + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + try: + return complexjson.loads(self.text, **kwargs) + except JSONDecodeError as e: + # Catch JSON-related errors and raise as requests.JSONDecodeError + # This aliases json.JSONDecodeError and simplejson.JSONDecodeError + raise RequestsJSONDecodeError(e.msg, e.doc, e.pos) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get("link") + + resolved_links = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get("rel") or link.get("url") + resolved_links[key] = link + + return resolved_links + + def raise_for_status(self): + """Raises :class:`HTTPError`, if one occurred.""" + + http_error_msg = "" + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode("utf-8") + except UnicodeDecodeError: + reason = self.reason.decode("iso-8859-1") + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = ( + f"{self.status_code} Client Error: {reason} for url: {self.url}" + ) + + elif 500 <= self.status_code < 600: + http_error_msg = ( + f"{self.status_code} Server Error: {reason} for url: {self.url}" + ) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, "release_conn", None) + if release_conn is not None: + release_conn() diff --git a/env-llmeval/lib/python3.10/site-packages/requests/structures.py b/env-llmeval/lib/python3.10/site-packages/requests/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..188e13e4829591facb23ae0e2eda84b9807cb818 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/requests/structures.py @@ -0,0 +1,99 @@ +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super().__init__() + + def __repr__(self): + return f"" + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default)