diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e18ac8e4efcb1731d8adee2227917b45a0bbd32c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/__editable___lm_eval_0_4_2_finder.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3def44b1bb72697b23b9fbbeb7c6fa06f587c2ec
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/isympy.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0cf95f2b1ef81803d7ef1fed5cef050f4e29982
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/six.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/sqlitedict.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/sqlitedict.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ea8e4ffc915e70704b39398e7d36791f79d3b62
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/sqlitedict.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b094d7495e92d4aac73d4e103a20bf307ffa0259
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/threadpoolctl.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66372d0e9829ea7c7a40eb239c29db6d40c61c3b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/__pycache__/typing_extensions.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/AUTHORS b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..23b11ada16bb8e69695cf52e5994784d98054e0d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/AUTHORS
@@ -0,0 +1,7 @@
+# This is the list of Abseil authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+
+Google Inc.
diff --git a/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..53e63acaec1b4983f4b540908cfe5741da93d274
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/METADATA
@@ -0,0 +1,84 @@
+Metadata-Version: 2.1
+Name: absl-py
+Version: 2.1.0
+Summary: Abseil Python Common Libraries, see https://github.com/abseil/abseil-py.
+Home-page: https://github.com/abseil/abseil-py
+Author: The Abseil Authors
+License: Apache 2.0
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+License-File: AUTHORS
+
+# Abseil Python Common Libraries
+
+This repository is a collection of Python library code for building Python
+applications. The code is collected from Google's own Python code base, and has
+been extensively tested and used in production.
+
+## Features
+
+* Simple application startup
+* Distributed commandline flags system
+* Custom logging module with additional features
+* Testing utilities
+
+## Getting Started
+
+### Installation
+
+To install the package, simply run:
+
+```bash
+pip install absl-py
+```
+
+Or install from source:
+
+```bash
+python setup.py install
+```
+
+### Running Tests
+
+To run Abseil tests, you can clone the git repo and run
+[bazel](https://bazel.build/):
+
+```bash
+git clone https://github.com/abseil/abseil-py.git
+cd abseil-py
+bazel test absl/...
+```
+
+### Example Code
+
+Please refer to
+[smoke_tests/sample_app.py](https://github.com/abseil/abseil-py/blob/main/smoke_tests/sample_app.py)
+as an example to get started.
+
+## Documentation
+
+See the [Abseil Python Developer Guide](https://abseil.io/docs/python/).
+
+## Future Releases
+
+The current repository includes an initial set of libraries for early adoption.
+More components and interoperability with Abseil C++ Common Libraries
+will come in future releases.
+
+## License
+
+The Abseil Python library is licensed under the terms of the Apache
+license. See [LICENSE](LICENSE) for more information.
diff --git a/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..c972cbe72edb5146f8494bb476bbb9c71a9f80b6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/absl_py-2.1.0.dist-info/RECORD
@@ -0,0 +1,53 @@
+absl/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
+absl/__pycache__/__init__.cpython-310.pyc,,
+absl/__pycache__/app.cpython-310.pyc,,
+absl/__pycache__/command_name.cpython-310.pyc,,
+absl/app.py,sha256=DQROJ_Ovex6w2_nr_s7AHgXQle951XmcVtlNrMjfSFA,15374
+absl/app.pyi,sha256=DqRvFRos3oFk00lZJSKaHZuL_3-LnZl-ylg_VAXtPcc,1737
+absl/command_name.py,sha256=C7CuwMMedDLUOX88Et92QZb2se__nU7txgpO-01amxg,2301
+absl/flags/__init__.py,sha256=FgR_NxQG1xLA2ZxLU51HTrLWV5kbN9eSCI-47Z7D3WA,7728
+absl/flags/__pycache__/__init__.cpython-310.pyc,,
+absl/flags/__pycache__/_argument_parser.cpython-310.pyc,,
+absl/flags/__pycache__/_defines.cpython-310.pyc,,
+absl/flags/__pycache__/_exceptions.cpython-310.pyc,,
+absl/flags/__pycache__/_flag.cpython-310.pyc,,
+absl/flags/__pycache__/_flagvalues.cpython-310.pyc,,
+absl/flags/__pycache__/_helpers.cpython-310.pyc,,
+absl/flags/__pycache__/_validators.cpython-310.pyc,,
+absl/flags/__pycache__/_validators_classes.cpython-310.pyc,,
+absl/flags/__pycache__/argparse_flags.cpython-310.pyc,,
+absl/flags/_argument_parser.py,sha256=TQFhT0OcQuRO_1GTJoUvYC1KU6wV9f4Lc7jQmajBGi0,20934
+absl/flags/_defines.py,sha256=s_YA_tAHFU4wxrJqKLH5uMldTl1DtlUfSvgBbflXkQ8,52783
+absl/flags/_exceptions.py,sha256=Lws7ZZrlLJG83VHuOB4Z4CNfcSoKX5pJnsNRCtp-dMw,3657
+absl/flags/_flag.py,sha256=Sv_d7kDSZh-VNr4JGrBy4g7VxnbRspOOd5hO6wA94qk,19895
+absl/flags/_flagvalues.py,sha256=Gferpr9yg8Ntc6ij9tPiChliYz5jYWfVJoKzAREwNFw,54127
+absl/flags/_helpers.py,sha256=uWWeqbhc19kTXonfM7mNZT68ZakmJgu-v5IHeS9A9Xc,14081
+absl/flags/_validators.py,sha256=_hpVwThXQhL6PFOA9-L2ZRI-7zLu2UxU_hRJJWXYoHw,14144
+absl/flags/_validators_classes.py,sha256=KLBJhJAt8C18gy2Uq-q7bUFNS_AhPBlxlwGiNm5gWXU,6157
+absl/flags/argparse_flags.py,sha256=57E1HFa40tvnQ3DQzY3x1qdBUIxtfTTYAYONT_k8HOI,14485
+absl/logging/__init__.py,sha256=mzF3rusWjzLbuVdZI8SfPiIoqfWO9kBUhxVOvGZQTv4,42082
+absl/logging/__init__.pyi,sha256=NPAna_9rrYTVNIHLXUbdvsAZcNlv4IJs9yNnL59mxr8,5794
+absl/logging/__pycache__/__init__.cpython-310.pyc,,
+absl/logging/__pycache__/converter.cpython-310.pyc,,
+absl/logging/converter.py,sha256=eTucx1Ojix7YWMQUyWKzPRTrxGLuCkNsTmJa1GW6k94,6353
+absl/testing/__init__.py,sha256=7cM57swk2T1Hc5wxmt-JpcaR6xfdPJyL_lyRqgODvuM,584
+absl/testing/__pycache__/__init__.cpython-310.pyc,,
+absl/testing/__pycache__/_bazelize_command.cpython-310.pyc,,
+absl/testing/__pycache__/_pretty_print_reporter.cpython-310.pyc,,
+absl/testing/__pycache__/absltest.cpython-310.pyc,,
+absl/testing/__pycache__/flagsaver.cpython-310.pyc,,
+absl/testing/__pycache__/parameterized.cpython-310.pyc,,
+absl/testing/__pycache__/xml_reporter.cpython-310.pyc,,
+absl/testing/_bazelize_command.py,sha256=R4rV4j5AOSp3PNkVQKP1I-SKYzQbXyeuiOT3d23cTLA,2302
+absl/testing/_pretty_print_reporter.py,sha256=nL5qSsYWF6O_C6L9PexwFSPxs68Wc85RhdhRBN2AgTw,3140
+absl/testing/absltest.py,sha256=sgb0TPgNP0_nLKcxrHBlifvUsgufnYURVR8Vau3f278,101119
+absl/testing/flagsaver.py,sha256=514JmVdCn-P0jsTntskCtUfxrHyp3urLdn2bzDd991s,13392
+absl/testing/parameterized.py,sha256=PT1P3X__WkFC_NyGWifUdJeqn-BM4JI3yy-1zsGaFEI,27807
+absl/testing/xml_reporter.py,sha256=k_9cWhw01RGCQImGDciTa_RrBEEuPZ3IPD5IASoRwwM,21720
+absl_py-2.1.0.dist-info/AUTHORS,sha256=YoLudsylaQg7W5mLn4FroQMuEnuNx8RpQrhkd_xvv6U,296
+absl_py-2.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+absl_py-2.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+absl_py-2.1.0.dist-info/METADATA,sha256=CTp5OILgEjYv4Y7dpCHzW5QmM57hl-2i-AizwFlnRYA,2311
+absl_py-2.1.0.dist-info/RECORD,,
+absl_py-2.1.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
+absl_py-2.1.0.dist-info/top_level.txt,sha256=0M_1z27Hi5Bsj1EhTfE_ajdJdFxeP_aw0xXnR4BXXhI,5
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..033c86b7a40a331f281bd406e991ef1db597c208
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2016-2020 aio-libs collaboration.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..d343f6fbfb7feddb711b102ef4b683a1c1fa539e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/RECORD
@@ -0,0 +1,10 @@
+async_timeout-4.0.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+async_timeout-4.0.3.dist-info/LICENSE,sha256=4Y17uPUT4sRrtYXJS1hb0wcg3TzLId2weG9y0WZY-Sw,568
+async_timeout-4.0.3.dist-info/METADATA,sha256=WQVcnDIXQ2ntebcm-vYjhNLg_VMeTWw13_ReT-U36J4,4209
+async_timeout-4.0.3.dist-info/RECORD,,
+async_timeout-4.0.3.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92
+async_timeout-4.0.3.dist-info/top_level.txt,sha256=9oM4e7Twq8iD_7_Q3Mz0E6GPIB6vJvRFo-UBwUQtBDU,14
+async_timeout-4.0.3.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+async_timeout/__init__.py,sha256=A0VOqDGQ3cCPFp0NZJKIbx_VRP1Y2xPtQOZebVIUB88,7242
+async_timeout/__pycache__/__init__.cpython-310.pyc,,
+async_timeout/py.typed,sha256=tyozzRT1fziXETDxokmuyt6jhOmtjUbnVNJdZcG7ik0,12
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..2c08da084599354e5b2dbccb3ab716165e63d1a0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.41.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ad29955ef909f5f38e96b6d8a6c9ba54d9bccd53
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+async_timeout
diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47e85069d1389e9283bc52f5b7c9449a3bfafe4d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_login.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_login.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae7da060526c07408c11240711e94c0017f0f4e8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_login.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..257cf4b73a1d5cb1f4d235034131fe65b7b30a50
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d8114af213ed59f00daf29b30224732a5b4e73c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/constants.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07f7ccef4596c623c66461daf164659b83123f39
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/constants.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72425e3b2d9441e196981c165babe14911fb3d10
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/file_download.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/file_download.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24f83faa85d260ce6be35efe16907d53aac4ae20
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/file_download.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..daa327ddd8c318f1ca0b20c223b7477a85cbe78e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/repository.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/repository.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d4e1439191d62fa81104033f73fe112e48290c0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/__pycache__/repository.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dba34793c36bc5a1a09478df0c7edfc531a00db5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..39dead49f20266e1918ab04faf091d46719865cf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e95651ba68d0dc6caa616418d08363c773d16ca8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b287f68f5a8ec64e559545bf8ec1575f5b97f98
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/download.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e403e04a2ec3a9fa8f7028965e37b8cdc2b5b417
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/env.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb86681faa8000ad3e71804ee03449efd24e028b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b9b2b00c29e23929e9124ce96aa44b11902473a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03cffe514246a56f769a8f2ef646b24b7bafac79
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1169d4dd83f8a067b23e06902e4efd0751a8ace
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d375c9921ea449b12a164f366b5010f69122f97e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25490564c8ddf8b0d57df2a4e3ca9b6fd2ad6dc6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/__pycache__/user.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/user.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/user.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cde3ac04c8a0773cad0a767a8d14b36dce4ee2e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/commands/user.py
@@ -0,0 +1,188 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import subprocess
+from argparse import _SubParsersAction
+
+from requests.exceptions import HTTPError
+
+from huggingface_hub.commands import BaseHuggingfaceCLICommand
+from huggingface_hub.constants import (
+ ENDPOINT,
+ REPO_TYPES,
+ REPO_TYPES_URL_PREFIXES,
+ SPACES_SDK_TYPES,
+)
+from huggingface_hub.hf_api import HfApi
+
+from .._login import ( # noqa: F401 # for backward compatibility # noqa: F401 # for backward compatibility
+ NOTEBOOK_LOGIN_PASSWORD_HTML,
+ NOTEBOOK_LOGIN_TOKEN_HTML_END,
+ NOTEBOOK_LOGIN_TOKEN_HTML_START,
+ login,
+ logout,
+ notebook_login,
+)
+from ..utils import get_token
+from ._cli_utils import ANSI
+
+
+class UserCommands(BaseHuggingfaceCLICommand):
+ @staticmethod
+ def register_subcommand(parser: _SubParsersAction):
+ login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens")
+ login_parser.add_argument(
+ "--token",
+ type=str,
+ help="Token generated from https://huggingface.co/settings/tokens",
+ )
+ login_parser.add_argument(
+ "--add-to-git-credential",
+ action="store_true",
+ help="Optional: Save token to git credential helper.",
+ )
+ login_parser.set_defaults(func=lambda args: LoginCommand(args))
+ whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
+ whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
+ logout_parser = parser.add_parser("logout", help="Log out")
+ logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
+
+ # new system: git-based repo system
+ repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.")
+ repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")
+ repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")
+ repo_create_parser.add_argument(
+ "name",
+ type=str,
+ help="Name for your repo. Will be namespaced under your username to build the repo id.",
+ )
+ repo_create_parser.add_argument(
+ "--type",
+ type=str,
+ help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
+ )
+ repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
+ repo_create_parser.add_argument(
+ "--space_sdk",
+ type=str,
+ help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
+ choices=SPACES_SDK_TYPES,
+ )
+ repo_create_parser.add_argument(
+ "-y",
+ "--yes",
+ action="store_true",
+ help="Optional: answer Yes to the prompt",
+ )
+ repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
+
+
+class BaseUserCommand:
+ def __init__(self, args):
+ self.args = args
+ self._api = HfApi()
+
+
+class LoginCommand(BaseUserCommand):
+ def run(self):
+ login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential)
+
+
+class LogoutCommand(BaseUserCommand):
+ def run(self):
+ logout()
+
+
+class WhoamiCommand(BaseUserCommand):
+ def run(self):
+ token = get_token()
+ if token is None:
+ print("Not logged in")
+ exit()
+ try:
+ info = self._api.whoami(token)
+ print(info["name"])
+ orgs = [org["name"] for org in info["orgs"]]
+ if orgs:
+ print(ANSI.bold("orgs: "), ",".join(orgs))
+
+ if ENDPOINT != "https://huggingface.co":
+ print(f"Authenticated through private endpoint: {ENDPOINT}")
+ except HTTPError as e:
+ print(e)
+ print(ANSI.red(e.response.text))
+ exit(1)
+
+
+class RepoCreateCommand(BaseUserCommand):
+ def run(self):
+ token = get_token()
+ if token is None:
+ print("Not logged in")
+ exit(1)
+ try:
+ stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
+ print(ANSI.gray(stdout.strip()))
+ except FileNotFoundError:
+ print("Looks like you do not have git installed, please install.")
+
+ try:
+ stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
+ print(ANSI.gray(stdout.strip()))
+ except FileNotFoundError:
+ print(
+ ANSI.red(
+ "Looks like you do not have git-lfs installed, please install."
+ " You can install from https://git-lfs.github.com/."
+ " Then run `git lfs install` (you only have to do this once)."
+ )
+ )
+ print("")
+
+ user = self._api.whoami(token)["name"]
+ namespace = self.args.organization if self.args.organization is not None else user
+
+ repo_id = f"{namespace}/{self.args.name}"
+
+ if self.args.type not in REPO_TYPES:
+ print("Invalid repo --type")
+ exit(1)
+
+ if self.args.type in REPO_TYPES_URL_PREFIXES:
+ prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
+ else:
+ prefixed_repo_id = repo_id
+
+ print(f"You are about to create {ANSI.bold(prefixed_repo_id)}")
+
+ if not self.args.yes:
+ choice = input("Proceed? [Y/n] ").lower()
+ if not (choice == "" or choice == "y" or choice == "yes"):
+ print("Abort")
+ exit()
+ try:
+ url = self._api.create_repo(
+ repo_id=repo_id,
+ token=token,
+ repo_type=self.args.type,
+ space_sdk=self.args.space_sdk,
+ )
+ except HTTPError as e:
+ print(e)
+ print(ANSI.red(e.response.text))
+ exit(1)
+ print("\nYour repo now lives at:")
+ print(f" {ANSI.bold(url)}")
+ print("\nYou can clone it locally with the command below, and commit/push as usual.")
+ print(f"\n git clone {url}")
+ print("")
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b640cfd15c77c02b50a55d9e82c6a78d6388c21e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fbc52fbcb865934623f43d407af829e3c4a9713c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11644ab97aa55a79b45564555816ddfd57961e0d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..acd689d2eadad125d62d89bff2aeaa7113963a0d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1e5d5944899631583bf8cd9b7b95ee47f6b1920
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63438d754072cb9a874519ccd89174eeafdbf05e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86457cf7f9a824e7a8553044d153c96e20d87e8e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28714fd03a83e2ddc84ffe1e07d54b4152f34a1a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07c6de0bedd33f70c9fcaa2af15cc1819e3d58db
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8eec65557a0f0b1448b78f69871b083d7656be66
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bb6c2d0a1556d8119d3b400a9adcff73b34e6e5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ruff: noqa: F401
+"""Contains helpers to serialize tensors."""
+
+from ._base import StateDictSplit, split_state_dict_into_shards_factory
+from ._numpy import split_numpy_state_dict_into_shards
+from ._tensorflow import split_tf_state_dict_into_shards
+from ._torch import split_torch_state_dict_into_shards
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81662af1a4b09c3b6b214d29b3434f5eb513a562
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48cb6be69e09be435b0c20b17bff27a10b7b81b6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa9b47cafa4eafb200cd7a142361c5713c317cb4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c972c23bd69938f72af43019d4f3d5ca64aba7d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68b73bfdf20408b629edd5668fcc33937bd8bda9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7f7bba89263d31d5facb1ebed66c5f701dba973
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py
@@ -0,0 +1,169 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains helpers to split tensors into shards."""
+
+from dataclasses import dataclass, field
+from typing import Any, Callable, Dict, List, Optional, TypeVar
+
+from .. import logging
+
+
+TensorT = TypeVar("TensorT")
+TensorSizeFn_T = Callable[[TensorT], int]
+StorageIDFn_T = Callable[[TensorT], Optional[Any]]
+
+MAX_SHARD_SIZE = 5_000_000_000 # 5GB
+FILENAME_PATTERN = "model{suffix}.safetensors"
+
+logger = logging.get_logger(__file__)
+
+
+@dataclass
+class StateDictSplit:
+ is_sharded: bool = field(init=False)
+ metadata: Dict[str, Any]
+ filename_to_tensors: Dict[str, List[str]]
+ tensor_to_filename: Dict[str, str]
+
+ def __post_init__(self):
+ self.is_sharded = len(self.filename_to_tensors) > 1
+
+
+def split_state_dict_into_shards_factory(
+ state_dict: Dict[str, TensorT],
+ *,
+ get_tensor_size: TensorSizeFn_T,
+ get_storage_id: StorageIDFn_T = lambda tensor: None,
+ filename_pattern: str = FILENAME_PATTERN,
+ max_shard_size: int = MAX_SHARD_SIZE,
+) -> StateDictSplit:
+ """
+ Split a model state dictionary in shards so that each shard is smaller than a given size.
+
+ The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
+ made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
+ have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
+ [6+2+2GB], [6+2GB], [6GB].
+
+
+
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
+ size greater than `max_shard_size`.
+
+
+
+ Args:
+ state_dict (`Dict[str, Tensor]`):
+ The state dictionary to save.
+ get_tensor_size (`Callable[[Tensor], int]`):
+ A function that returns the size of a tensor in bytes.
+ get_storage_id (`Callable[[Tensor], Optional[Any]]`, *optional*):
+ A function that returns a unique identifier to a tensor storage. Multiple different tensors can share the
+ same underlying storage. This identifier is guaranteed to be unique and constant for this tensor's storage
+ during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id.
+ filename_pattern (`str`, *optional*):
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
+ Defaults to `"model{suffix}.safetensors"`.
+ max_shard_size (`int` or `str`, *optional*):
+ The maximum size of each shard, in bytes. Defaults to 5GB.
+
+ Returns:
+ [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
+ """
+ storage_id_to_tensors: Dict[Any, List[str]] = {}
+
+ shard_list: List[Dict[str, TensorT]] = []
+ current_shard: Dict[str, TensorT] = {}
+ current_shard_size = 0
+ total_size = 0
+
+ for key, tensor in state_dict.items():
+ # when bnb serialization is used the weights in the state dict can be strings
+ # check: https://github.com/huggingface/transformers/pull/24416 for more details
+ if isinstance(tensor, str):
+ logger.info("Skipping tensor %s as it is a string (bnb serialization)", key)
+ continue
+
+ # If a `tensor` shares the same underlying storage as another tensor, we put `tensor` in the same `block`
+ storage_id = get_storage_id(tensor)
+ if storage_id is not None:
+ if storage_id in storage_id_to_tensors:
+ # We skip this tensor for now and will reassign to correct shard later
+ storage_id_to_tensors[storage_id].append(key)
+ continue
+ else:
+ # This is the first tensor with this storage_id, we create a new entry
+ # in the storage_id_to_tensors dict => we will assign the shard id later
+ storage_id_to_tensors[storage_id] = [key]
+
+ # Compute tensor size
+ tensor_size = get_tensor_size(tensor)
+
+ # If this tensor is bigger than the maximal size, we put it in its own shard
+ if tensor_size > max_shard_size:
+ total_size += tensor_size
+ shard_list.append({key: tensor})
+ continue
+
+ # If this tensor is going to tip up over the maximal size, we split.
+ # Current shard already has some tensors, we add it to the list of shards and create a new one.
+ if current_shard_size + tensor_size > max_shard_size:
+ shard_list.append(current_shard)
+ current_shard = {}
+ current_shard_size = 0
+
+ # Add the tensor to the current shard
+ current_shard[key] = tensor
+ current_shard_size += tensor_size
+ total_size += tensor_size
+
+ # Add the last shard
+ if len(current_shard) > 0:
+ shard_list.append(current_shard)
+ nb_shards = len(shard_list)
+
+ # Loop over the tensors that share the same storage and assign them together
+ for storage_id, keys in storage_id_to_tensors.items():
+ # Let's try to find the shard where the first tensor of this storage is and put all tensors in the same shard
+ for shard in shard_list:
+ if keys[0] in shard:
+ for key in keys:
+ shard[key] = state_dict[key]
+ break
+
+ # If we only have one shard, we return it => no need to build the index
+ if nb_shards == 1:
+ filename = filename_pattern.format(suffix="")
+ return StateDictSplit(
+ metadata={"total_size": total_size},
+ filename_to_tensors={filename: list(state_dict.keys())},
+ tensor_to_filename={key: filename for key in state_dict.keys()},
+ )
+
+ # Now that each tensor is assigned to a shard, let's assign a filename to each shard
+ tensor_name_to_filename = {}
+ filename_to_tensors = {}
+ for idx, shard in enumerate(shard_list):
+ filename = filename_pattern.format(suffix=f"-{idx+1:05d}-of-{nb_shards:05d}")
+ for key in shard:
+ tensor_name_to_filename[key] = filename
+ filename_to_tensors[filename] = list(shard.keys())
+
+ # Build the index and return
+ return StateDictSplit(
+ metadata={"total_size": total_size},
+ filename_to_tensors=filename_to_tensors,
+ tensor_to_filename=tensor_name_to_filename,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..214c77d9acde2a14069f403ed337e6c8c57047ad
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py
@@ -0,0 +1,68 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains numpy-specific helpers."""
+
+from typing import TYPE_CHECKING, Dict
+
+from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
+
+
+if TYPE_CHECKING:
+ import numpy as np
+
+
+def split_numpy_state_dict_into_shards(
+ state_dict: Dict[str, "np.ndarray"],
+ *,
+ filename_pattern: str = FILENAME_PATTERN,
+ max_shard_size: int = MAX_SHARD_SIZE,
+) -> StateDictSplit:
+ """
+ Split a model state dictionary in shards so that each shard is smaller than a given size.
+
+ The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
+ made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
+ have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
+ [6+2+2GB], [6+2GB], [6GB].
+
+
+
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
+ size greater than `max_shard_size`.
+
+
+
+ Args:
+ state_dict (`Dict[str, np.ndarray]`):
+ The state dictionary to save.
+ filename_pattern (`str`, *optional*):
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
+ Defaults to `"model{suffix}.safetensors"`.
+ max_shard_size (`int` or `str`, *optional*):
+ The maximum size of each shard, in bytes. Defaults to 5GB.
+
+ Returns:
+ [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
+ """
+ return split_state_dict_into_shards_factory(
+ state_dict,
+ max_shard_size=max_shard_size,
+ filename_pattern=filename_pattern,
+ get_tensor_size=get_tensor_size,
+ )
+
+
+def get_tensor_size(tensor: "np.ndarray") -> int:
+ return tensor.nbytes
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8d752c083063d3e9772b69982e8f979fbda53ea
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py
@@ -0,0 +1,94 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains tensorflow-specific helpers."""
+
+import math
+import re
+from typing import TYPE_CHECKING, Dict
+
+from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
+
+
+if TYPE_CHECKING:
+ import tensorflow as tf
+
+
+def split_tf_state_dict_into_shards(
+ state_dict: Dict[str, "tf.Tensor"],
+ *,
+ filename_pattern: str = "tf_model{suffix}.h5",
+ max_shard_size: int = MAX_SHARD_SIZE,
+) -> StateDictSplit:
+ """
+ Split a model state dictionary in shards so that each shard is smaller than a given size.
+
+ The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
+ made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
+ have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
+ [6+2+2GB], [6+2GB], [6GB].
+
+
+
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
+ size greater than `max_shard_size`.
+
+
+
+ Args:
+ state_dict (`Dict[str, Tensor]`):
+ The state dictionary to save.
+ filename_pattern (`str`, *optional*):
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
+ Defaults to `"tf_model{suffix}.h5"`.
+ max_shard_size (`int` or `str`, *optional*):
+ The maximum size of each shard, in bytes. Defaults to 5GB.
+
+ Returns:
+ [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
+ """
+ return split_state_dict_into_shards_factory(
+ state_dict,
+ max_shard_size=max_shard_size,
+ filename_pattern=filename_pattern,
+ get_tensor_size=get_tensor_size,
+ )
+
+
+def get_tensor_size(tensor: "tf.Tensor") -> int:
+ # Return `math.ceil` since dtype byte size can be a float (e.g., 0.125 for tf.bool).
+ # Better to overestimate than underestimate.
+ return math.ceil(tensor.numpy().size * _dtype_byte_size_tf(tensor.dtype))
+
+
+def _dtype_byte_size_tf(dtype) -> float:
+ """
+ Returns the size (in bytes) occupied by one parameter of type `dtype`.
+ Taken from https://github.com/huggingface/transformers/blob/74d9d0cebb0263a3f8ab9c280569170cc74651d0/src/transformers/modeling_tf_utils.py#L608.
+ NOTE: why not `tensor.numpy().nbytes`?
+ Example:
+ ```py
+ >>> _dtype_byte_size(tf.float32)
+ 4
+ ```
+ """
+ import tensorflow as tf
+
+ if dtype == tf.bool:
+ return 1 / 8
+ bit_search = re.search(r"[^\d](\d+)$", dtype.name)
+ if bit_search is None:
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
+ bit_size = int(bit_search.groups()[0])
+ return bit_size // 8
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py
new file mode 100644
index 0000000000000000000000000000000000000000..00ab7e2c80d7a8fde928588c284213fca2100cf3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py
@@ -0,0 +1,200 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains pytorch-specific helpers."""
+
+import importlib
+from functools import lru_cache
+from typing import TYPE_CHECKING, Dict, Tuple
+
+from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
+
+
+if TYPE_CHECKING:
+ import torch
+
+
+def split_torch_state_dict_into_shards(
+ state_dict: Dict[str, "torch.Tensor"],
+ *,
+ filename_pattern: str = FILENAME_PATTERN,
+ max_shard_size: int = MAX_SHARD_SIZE,
+) -> StateDictSplit:
+ """
+ Split a model state dictionary in shards so that each shard is smaller than a given size.
+
+ The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
+ made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
+ have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
+ [6+2+2GB], [6+2GB], [6GB].
+
+
+
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
+ size greater than `max_shard_size`.
+
+
+
+ Args:
+ state_dict (`Dict[str, torch.Tensor]`):
+ The state dictionary to save.
+ filename_pattern (`str`, *optional*):
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
+ Defaults to `"model{suffix}.safetensors"`.
+ max_shard_size (`int` or `str`, *optional*):
+ The maximum size of each shard, in bytes. Defaults to 5GB.
+
+ Returns:
+ [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
+
+ Example:
+ ```py
+ >>> import json
+ >>> import os
+ >>> from safetensors.torch import save_file as safe_save_file
+ >>> from huggingface_hub import split_torch_state_dict_into_shards
+
+ >>> def save_state_dict(state_dict: Dict[str, torch.Tensor], save_directory: str):
+ ... state_dict_split = split_torch_state_dict_into_shards(state_dict)
+ ... for filename, tensors in state_dict_split.filename_to_tensors.values():
+ ... shard = {tensor: state_dict[tensor] for tensor in tensors}
+ ... safe_save_file(
+ ... shard,
+ ... os.path.join(save_directory, filename),
+ ... metadata={"format": "pt"},
+ ... )
+ ... if state_dict_split.is_sharded:
+ ... index = {
+ ... "metadata": state_dict_split.metadata,
+ ... "weight_map": state_dict_split.tensor_to_filename,
+ ... }
+ ... with open(os.path.join(save_directory, "model.safetensors.index.json"), "w") as f:
+ ... f.write(json.dumps(index, indent=2))
+ ```
+ """
+ return split_state_dict_into_shards_factory(
+ state_dict,
+ max_shard_size=max_shard_size,
+ filename_pattern=filename_pattern,
+ get_tensor_size=get_tensor_size,
+ get_storage_id=get_storage_id,
+ )
+
+
+def get_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]:
+ """
+ Return unique identifier to a tensor storage.
+
+ Multiple different tensors can share the same underlying storage. For
+ example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
+ guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
+ non-overlapping lifetimes may have the same id.
+
+ Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/pytorch_utils.py#L278.
+ """
+ if tensor.device.type == "xla" and is_torch_tpu_available():
+ # NOTE: xla tensors dont have storage
+ # use some other unique id to distinguish.
+ # this is a XLA tensor, it must be created using torch_xla's
+ # device. So the following import is safe:
+ import torch_xla
+
+ unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
+ else:
+ unique_id = storage_ptr(tensor)
+
+ return tensor.device, unique_id, get_storage_size(tensor)
+
+
+def get_tensor_size(tensor: "torch.Tensor") -> int:
+ return tensor.numel() * tensor.element_size()
+
+
+@lru_cache()
+def is_torch_tpu_available(check_device=True):
+ """
+ Checks if `torch_xla` is installed and potentially if a TPU is in the environment
+
+ Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/utils/import_utils.py#L463.
+ """
+ if importlib.util.find_spec("torch_xla") is not None:
+ if check_device:
+ # We need to check if `xla_device` can be found, will raise a RuntimeError if not
+ try:
+ import torch_xla.core.xla_model as xm
+
+ _ = xm.xla_device()
+ return True
+ except RuntimeError:
+ return False
+ return True
+ return False
+
+
+def storage_ptr(tensor: "torch.Tensor") -> int:
+ """
+ Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L11C1-L20C21.
+ """
+ try:
+ return tensor.untyped_storage().data_ptr()
+ except Exception:
+ # Fallback for torch==1.10
+ try:
+ return tensor.storage().data_ptr()
+ except NotImplementedError:
+ # Fallback for meta storage
+ return 0
+
+
+def get_storage_size(tensor: "torch.Tensor") -> int:
+ """
+ Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L31C1-L41C59
+ """
+ try:
+ return tensor.untyped_storage().nbytes()
+ except AttributeError:
+ # Fallback for torch==1.10
+ try:
+ return tensor.storage().size() * _get_dtype_size(tensor.dtype)
+ except NotImplementedError:
+ # Fallback for meta storage
+ # On torch >=2.0 this is the tensor size
+ return tensor.nelement() * _get_dtype_size(tensor.dtype)
+
+
+@lru_cache()
+def _get_dtype_size(dtype: "torch.dtype") -> int:
+ """
+ Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L344
+ """
+ import torch
+
+ # torch.float8 formats require 2.1; we do not support these dtypes on earlier versions
+ _float8_e4m3fn = getattr(torch, "float8_e4m3fn", None)
+ _float8_e5m2 = getattr(torch, "float8_e5m2", None)
+ _SIZE = {
+ torch.int64: 8,
+ torch.float32: 4,
+ torch.int32: 4,
+ torch.bfloat16: 2,
+ torch.float16: 2,
+ torch.int16: 2,
+ torch.uint8: 1,
+ torch.int8: 1,
+ torch.bool: 1,
+ torch.float64: 8,
+ _float8_e4m3fn: 1,
+ _float8_e5m2: 1,
+ }
+ return _SIZE[dtype]
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..78ce8fdadc58ac3112324be25c02cbb56637f86e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_manager.py
@@ -0,0 +1,813 @@
+# coding=utf-8
+# Copyright 2022-present, the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains utilities to manage the HF cache directory."""
+
+import os
+import shutil
+import time
+from collections import defaultdict
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Dict, FrozenSet, List, Literal, Optional, Set, Union
+
+from ..constants import HF_HUB_CACHE
+from . import logging
+
+
+logger = logging.get_logger(__name__)
+
+REPO_TYPE_T = Literal["model", "dataset", "space"]
+
+# List of OS-created helper files that need to be ignored
+FILES_TO_IGNORE = [".DS_Store"]
+
+
+class CacheNotFound(Exception):
+ """Exception thrown when the Huggingface cache is not found."""
+
+ cache_dir: Union[str, Path]
+
+ def __init__(self, msg: str, cache_dir: Union[str, Path], *args, **kwargs):
+ super().__init__(msg, *args, **kwargs)
+ self.cache_dir = cache_dir
+
+
+class CorruptedCacheException(Exception):
+ """Exception for any unexpected structure in the Huggingface cache-system."""
+
+
+@dataclass(frozen=True)
+class CachedFileInfo:
+ """Frozen data structure holding information about a single cached file.
+
+ Args:
+ file_name (`str`):
+ Name of the file. Example: `config.json`.
+ file_path (`Path`):
+ Path of the file in the `snapshots` directory. The file path is a symlink
+ referring to a blob in the `blobs` folder.
+ blob_path (`Path`):
+ Path of the blob file. This is equivalent to `file_path.resolve()`.
+ size_on_disk (`int`):
+ Size of the blob file in bytes.
+ blob_last_accessed (`float`):
+ Timestamp of the last time the blob file has been accessed (from any
+ revision).
+ blob_last_modified (`float`):
+ Timestamp of the last time the blob file has been modified/created.
+
+
+
+ `blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you
+ are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
+ for more details.
+
+
+ """
+
+ file_name: str
+ file_path: Path
+ blob_path: Path
+ size_on_disk: int
+
+ blob_last_accessed: float
+ blob_last_modified: float
+
+ @property
+ def blob_last_accessed_str(self) -> str:
+ """
+ (property) Timestamp of the last time the blob file has been accessed (from any
+ revision), returned as a human-readable string.
+
+ Example: "2 weeks ago".
+ """
+ return _format_timesince(self.blob_last_accessed)
+
+ @property
+ def blob_last_modified_str(self) -> str:
+ """
+ (property) Timestamp of the last time the blob file has been modified, returned
+ as a human-readable string.
+
+ Example: "2 weeks ago".
+ """
+ return _format_timesince(self.blob_last_modified)
+
+ @property
+ def size_on_disk_str(self) -> str:
+ """
+ (property) Size of the blob file as a human-readable string.
+
+ Example: "42.2K".
+ """
+ return _format_size(self.size_on_disk)
+
+
+@dataclass(frozen=True)
+class CachedRevisionInfo:
+ """Frozen data structure holding information about a revision.
+
+ A revision correspond to a folder in the `snapshots` folder and is populated with
+ the exact tree structure as the repo on the Hub but contains only symlinks. A
+ revision can be either referenced by 1 or more `refs` or be "detached" (no refs).
+
+ Args:
+ commit_hash (`str`):
+ Hash of the revision (unique).
+ Example: `"9338f7b671827df886678df2bdd7cc7b4f36dffd"`.
+ snapshot_path (`Path`):
+ Path to the revision directory in the `snapshots` folder. It contains the
+ exact tree structure as the repo on the Hub.
+ files: (`FrozenSet[CachedFileInfo]`):
+ Set of [`~CachedFileInfo`] describing all files contained in the snapshot.
+ refs (`FrozenSet[str]`):
+ Set of `refs` pointing to this revision. If the revision has no `refs`, it
+ is considered detached.
+ Example: `{"main", "2.4.0"}` or `{"refs/pr/1"}`.
+ size_on_disk (`int`):
+ Sum of the blob file sizes that are symlink-ed by the revision.
+ last_modified (`float`):
+ Timestamp of the last time the revision has been created/modified.
+
+
+
+ `last_accessed` cannot be determined correctly on a single revision as blob files
+ are shared across revisions.
+
+
+
+
+
+ `size_on_disk` is not necessarily the sum of all file sizes because of possible
+ duplicated files. Besides, only blobs are taken into account, not the (negligible)
+ size of folders and symlinks.
+
+
+ """
+
+ commit_hash: str
+ snapshot_path: Path
+ size_on_disk: int
+ files: FrozenSet[CachedFileInfo]
+ refs: FrozenSet[str]
+
+ last_modified: float
+
+ @property
+ def last_modified_str(self) -> str:
+ """
+ (property) Timestamp of the last time the revision has been modified, returned
+ as a human-readable string.
+
+ Example: "2 weeks ago".
+ """
+ return _format_timesince(self.last_modified)
+
+ @property
+ def size_on_disk_str(self) -> str:
+ """
+ (property) Sum of the blob file sizes as a human-readable string.
+
+ Example: "42.2K".
+ """
+ return _format_size(self.size_on_disk)
+
+ @property
+ def nb_files(self) -> int:
+ """
+ (property) Total number of files in the revision.
+ """
+ return len(self.files)
+
+
+@dataclass(frozen=True)
+class CachedRepoInfo:
+ """Frozen data structure holding information about a cached repository.
+
+ Args:
+ repo_id (`str`):
+ Repo id of the repo on the Hub. Example: `"google/fleurs"`.
+ repo_type (`Literal["dataset", "model", "space"]`):
+ Type of the cached repo.
+ repo_path (`Path`):
+ Local path to the cached repo.
+ size_on_disk (`int`):
+ Sum of the blob file sizes in the cached repo.
+ nb_files (`int`):
+ Total number of blob files in the cached repo.
+ revisions (`FrozenSet[CachedRevisionInfo]`):
+ Set of [`~CachedRevisionInfo`] describing all revisions cached in the repo.
+ last_accessed (`float`):
+ Timestamp of the last time a blob file of the repo has been accessed.
+ last_modified (`float`):
+ Timestamp of the last time a blob file of the repo has been modified/created.
+
+
+
+ `size_on_disk` is not necessarily the sum of all revisions sizes because of
+ duplicated files. Besides, only blobs are taken into account, not the (negligible)
+ size of folders and symlinks.
+
+
+
+
+
+ `last_accessed` and `last_modified` reliability can depend on the OS you are using.
+ See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
+ for more details.
+
+
+ """
+
+ repo_id: str
+ repo_type: REPO_TYPE_T
+ repo_path: Path
+ size_on_disk: int
+ nb_files: int
+ revisions: FrozenSet[CachedRevisionInfo]
+
+ last_accessed: float
+ last_modified: float
+
+ @property
+ def last_accessed_str(self) -> str:
+ """
+ (property) Last time a blob file of the repo has been accessed, returned as a
+ human-readable string.
+
+ Example: "2 weeks ago".
+ """
+ return _format_timesince(self.last_accessed)
+
+ @property
+ def last_modified_str(self) -> str:
+ """
+ (property) Last time a blob file of the repo has been modified, returned as a
+ human-readable string.
+
+ Example: "2 weeks ago".
+ """
+ return _format_timesince(self.last_modified)
+
+ @property
+ def size_on_disk_str(self) -> str:
+ """
+ (property) Sum of the blob file sizes as a human-readable string.
+
+ Example: "42.2K".
+ """
+ return _format_size(self.size_on_disk)
+
+ @property
+ def refs(self) -> Dict[str, CachedRevisionInfo]:
+ """
+ (property) Mapping between `refs` and revision data structures.
+ """
+ return {ref: revision for revision in self.revisions for ref in revision.refs}
+
+
+@dataclass(frozen=True)
+class DeleteCacheStrategy:
+ """Frozen data structure holding the strategy to delete cached revisions.
+
+ This object is not meant to be instantiated programmatically but to be returned by
+ [`~utils.HFCacheInfo.delete_revisions`]. See documentation for usage example.
+
+ Args:
+ expected_freed_size (`float`):
+ Expected freed size once strategy is executed.
+ blobs (`FrozenSet[Path]`):
+ Set of blob file paths to be deleted.
+ refs (`FrozenSet[Path]`):
+ Set of reference file paths to be deleted.
+ repos (`FrozenSet[Path]`):
+ Set of entire repo paths to be deleted.
+ snapshots (`FrozenSet[Path]`):
+ Set of snapshots to be deleted (directory of symlinks).
+ """
+
+ expected_freed_size: int
+ blobs: FrozenSet[Path]
+ refs: FrozenSet[Path]
+ repos: FrozenSet[Path]
+ snapshots: FrozenSet[Path]
+
+ @property
+ def expected_freed_size_str(self) -> str:
+ """
+ (property) Expected size that will be freed as a human-readable string.
+
+ Example: "42.2K".
+ """
+ return _format_size(self.expected_freed_size)
+
+ def execute(self) -> None:
+ """Execute the defined strategy.
+
+
+
+ If this method is interrupted, the cache might get corrupted. Deletion order is
+ implemented so that references and symlinks are deleted before the actual blob
+ files.
+
+
+
+
+
+ This method is irreversible. If executed, cached files are erased and must be
+ downloaded again.
+
+
+ """
+ # Deletion order matters. Blobs are deleted in last so that the user can't end
+ # up in a state where a `ref`` refers to a missing snapshot or a snapshot
+ # symlink refers to a deleted blob.
+
+ # Delete entire repos
+ for path in self.repos:
+ _try_delete_path(path, path_type="repo")
+
+ # Delete snapshot directories
+ for path in self.snapshots:
+ _try_delete_path(path, path_type="snapshot")
+
+ # Delete refs files
+ for path in self.refs:
+ _try_delete_path(path, path_type="ref")
+
+ # Delete blob files
+ for path in self.blobs:
+ _try_delete_path(path, path_type="blob")
+
+ logger.info(f"Cache deletion done. Saved {self.expected_freed_size_str}.")
+
+
+@dataclass(frozen=True)
+class HFCacheInfo:
+ """Frozen data structure holding information about the entire cache-system.
+
+ This data structure is returned by [`scan_cache_dir`] and is immutable.
+
+ Args:
+ size_on_disk (`int`):
+ Sum of all valid repo sizes in the cache-system.
+ repos (`FrozenSet[CachedRepoInfo]`):
+ Set of [`~CachedRepoInfo`] describing all valid cached repos found on the
+ cache-system while scanning.
+ warnings (`List[CorruptedCacheException]`):
+ List of [`~CorruptedCacheException`] that occurred while scanning the cache.
+ Those exceptions are captured so that the scan can continue. Corrupted repos
+ are skipped from the scan.
+
+
+
+ Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if
+ some cached repos are corrupted, their sizes are not taken into account.
+
+
+ """
+
+ size_on_disk: int
+ repos: FrozenSet[CachedRepoInfo]
+ warnings: List[CorruptedCacheException]
+
+ @property
+ def size_on_disk_str(self) -> str:
+ """
+ (property) Sum of all valid repo sizes in the cache-system as a human-readable
+ string.
+
+ Example: "42.2K".
+ """
+ return _format_size(self.size_on_disk)
+
+ def delete_revisions(self, *revisions: str) -> DeleteCacheStrategy:
+ """Prepare the strategy to delete one or more revisions cached locally.
+
+ Input revisions can be any revision hash. If a revision hash is not found in the
+ local cache, a warning is thrown but no error is raised. Revisions can be from
+ different cached repos since hashes are unique across repos,
+
+ Examples:
+ ```py
+ >>> from huggingface_hub import scan_cache_dir
+ >>> cache_info = scan_cache_dir()
+ >>> delete_strategy = cache_info.delete_revisions(
+ ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa"
+ ... )
+ >>> print(f"Will free {delete_strategy.expected_freed_size_str}.")
+ Will free 7.9K.
+ >>> delete_strategy.execute()
+ Cache deletion done. Saved 7.9K.
+ ```
+
+ ```py
+ >>> from huggingface_hub import scan_cache_dir
+ >>> scan_cache_dir().delete_revisions(
+ ... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa",
+ ... "e2983b237dccf3ab4937c97fa717319a9ca1a96d",
+ ... "6c0e6080953db56375760c0471a8c5f2929baf11",
+ ... ).execute()
+ Cache deletion done. Saved 8.6G.
+ ```
+
+
+
+ `delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to
+ be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but
+ allows having a dry run before actually executing the deletion.
+
+
+ """
+ hashes_to_delete: Set[str] = set(revisions)
+
+ repos_with_revisions: Dict[CachedRepoInfo, Set[CachedRevisionInfo]] = defaultdict(set)
+
+ for repo in self.repos:
+ for revision in repo.revisions:
+ if revision.commit_hash in hashes_to_delete:
+ repos_with_revisions[repo].add(revision)
+ hashes_to_delete.remove(revision.commit_hash)
+
+ if len(hashes_to_delete) > 0:
+ logger.warning(f"Revision(s) not found - cannot delete them: {', '.join(hashes_to_delete)}")
+
+ delete_strategy_blobs: Set[Path] = set()
+ delete_strategy_refs: Set[Path] = set()
+ delete_strategy_repos: Set[Path] = set()
+ delete_strategy_snapshots: Set[Path] = set()
+ delete_strategy_expected_freed_size = 0
+
+ for affected_repo, revisions_to_delete in repos_with_revisions.items():
+ other_revisions = affected_repo.revisions - revisions_to_delete
+
+ # If no other revisions, it means all revisions are deleted
+ # -> delete the entire cached repo
+ if len(other_revisions) == 0:
+ delete_strategy_repos.add(affected_repo.repo_path)
+ delete_strategy_expected_freed_size += affected_repo.size_on_disk
+ continue
+
+ # Some revisions of the repo will be deleted but not all. We need to filter
+ # which blob files will not be linked anymore.
+ for revision_to_delete in revisions_to_delete:
+ # Snapshot dir
+ delete_strategy_snapshots.add(revision_to_delete.snapshot_path)
+
+ # Refs dir
+ for ref in revision_to_delete.refs:
+ delete_strategy_refs.add(affected_repo.repo_path / "refs" / ref)
+
+ # Blobs dir
+ for file in revision_to_delete.files:
+ if file.blob_path not in delete_strategy_blobs:
+ is_file_alone = True
+ for revision in other_revisions:
+ for rev_file in revision.files:
+ if file.blob_path == rev_file.blob_path:
+ is_file_alone = False
+ break
+ if not is_file_alone:
+ break
+
+ # Blob file not referenced by remaining revisions -> delete
+ if is_file_alone:
+ delete_strategy_blobs.add(file.blob_path)
+ delete_strategy_expected_freed_size += file.size_on_disk
+
+ # Return the strategy instead of executing it.
+ return DeleteCacheStrategy(
+ blobs=frozenset(delete_strategy_blobs),
+ refs=frozenset(delete_strategy_refs),
+ repos=frozenset(delete_strategy_repos),
+ snapshots=frozenset(delete_strategy_snapshots),
+ expected_freed_size=delete_strategy_expected_freed_size,
+ )
+
+
+def scan_cache_dir(cache_dir: Optional[Union[str, Path]] = None) -> HFCacheInfo:
+ """Scan the entire HF cache-system and return a [`~HFCacheInfo`] structure.
+
+ Use `scan_cache_dir` in order to programmatically scan your cache-system. The cache
+ will be scanned repo by repo. If a repo is corrupted, a [`~CorruptedCacheException`]
+ will be thrown internally but captured and returned in the [`~HFCacheInfo`]
+ structure. Only valid repos get a proper report.
+
+ ```py
+ >>> from huggingface_hub import scan_cache_dir
+
+ >>> hf_cache_info = scan_cache_dir()
+ HFCacheInfo(
+ size_on_disk=3398085269,
+ repos=frozenset({
+ CachedRepoInfo(
+ repo_id='t5-small',
+ repo_type='model',
+ repo_path=PosixPath(...),
+ size_on_disk=970726914,
+ nb_files=11,
+ revisions=frozenset({
+ CachedRevisionInfo(
+ commit_hash='d78aea13fa7ecd06c29e3e46195d6341255065d5',
+ size_on_disk=970726339,
+ snapshot_path=PosixPath(...),
+ files=frozenset({
+ CachedFileInfo(
+ file_name='config.json',
+ size_on_disk=1197
+ file_path=PosixPath(...),
+ blob_path=PosixPath(...),
+ ),
+ CachedFileInfo(...),
+ ...
+ }),
+ ),
+ CachedRevisionInfo(...),
+ ...
+ }),
+ ),
+ CachedRepoInfo(...),
+ ...
+ }),
+ warnings=[
+ CorruptedCacheException("Snapshots dir doesn't exist in cached repo: ..."),
+ CorruptedCacheException(...),
+ ...
+ ],
+ )
+ ```
+
+ You can also print a detailed report directly from the `huggingface-cli` using:
+ ```text
+ > huggingface-cli scan-cache
+ REPO ID REPO TYPE SIZE ON DISK NB FILES REFS LOCAL PATH
+ --------------------------- --------- ------------ -------- ------------------- -------------------------------------------------------------------------
+ glue dataset 116.3K 15 1.17.0, main, 2.4.0 /Users/lucain/.cache/huggingface/hub/datasets--glue
+ google/fleurs dataset 64.9M 6 main, refs/pr/1 /Users/lucain/.cache/huggingface/hub/datasets--google--fleurs
+ Jean-Baptiste/camembert-ner model 441.0M 7 main /Users/lucain/.cache/huggingface/hub/models--Jean-Baptiste--camembert-ner
+ bert-base-cased model 1.9G 13 main /Users/lucain/.cache/huggingface/hub/models--bert-base-cased
+ t5-base model 10.1K 3 main /Users/lucain/.cache/huggingface/hub/models--t5-base
+ t5-small model 970.7M 11 refs/pr/1, main /Users/lucain/.cache/huggingface/hub/models--t5-small
+
+ Done in 0.0s. Scanned 6 repo(s) for a total of 3.4G.
+ Got 1 warning(s) while scanning. Use -vvv to print details.
+ ```
+
+ Args:
+ cache_dir (`str` or `Path`, `optional`):
+ Cache directory to cache. Defaults to the default HF cache directory.
+
+
+
+ Raises:
+
+ `CacheNotFound`
+ If the cache directory does not exist.
+
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If the cache directory is a file, instead of a directory.
+
+
+
+ Returns: a [`~HFCacheInfo`] object.
+ """
+ if cache_dir is None:
+ cache_dir = HF_HUB_CACHE
+
+ cache_dir = Path(cache_dir).expanduser().resolve()
+ if not cache_dir.exists():
+ raise CacheNotFound(
+ f"Cache directory not found: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable.",
+ cache_dir=cache_dir,
+ )
+
+ if cache_dir.is_file():
+ raise ValueError(
+ f"Scan cache expects a directory but found a file: {cache_dir}. Please use `cache_dir` argument or set `HF_HUB_CACHE` environment variable."
+ )
+
+ repos: Set[CachedRepoInfo] = set()
+ warnings: List[CorruptedCacheException] = []
+ for repo_path in cache_dir.iterdir():
+ if repo_path.name == ".locks": # skip './.locks/' folder
+ continue
+ try:
+ repos.add(_scan_cached_repo(repo_path))
+ except CorruptedCacheException as e:
+ warnings.append(e)
+
+ return HFCacheInfo(
+ repos=frozenset(repos),
+ size_on_disk=sum(repo.size_on_disk for repo in repos),
+ warnings=warnings,
+ )
+
+
+def _scan_cached_repo(repo_path: Path) -> CachedRepoInfo:
+ """Scan a single cache repo and return information about it.
+
+ Any unexpected behavior will raise a [`~CorruptedCacheException`].
+ """
+ if not repo_path.is_dir():
+ raise CorruptedCacheException(f"Repo path is not a directory: {repo_path}")
+
+ if "--" not in repo_path.name:
+ raise CorruptedCacheException(f"Repo path is not a valid HuggingFace cache directory: {repo_path}")
+
+ repo_type, repo_id = repo_path.name.split("--", maxsplit=1)
+ repo_type = repo_type[:-1] # "models" -> "model"
+ repo_id = repo_id.replace("--", "/") # google/fleurs -> "google/fleurs"
+
+ if repo_type not in {"dataset", "model", "space"}:
+ raise CorruptedCacheException(
+ f"Repo type must be `dataset`, `model` or `space`, found `{repo_type}` ({repo_path})."
+ )
+
+ blob_stats: Dict[Path, os.stat_result] = {} # Key is blob_path, value is blob stats
+
+ snapshots_path = repo_path / "snapshots"
+ refs_path = repo_path / "refs"
+
+ if not snapshots_path.exists() or not snapshots_path.is_dir():
+ raise CorruptedCacheException(f"Snapshots dir doesn't exist in cached repo: {snapshots_path}")
+
+ # Scan over `refs` directory
+
+ # key is revision hash, value is set of refs
+ refs_by_hash: Dict[str, Set[str]] = defaultdict(set)
+ if refs_path.exists():
+ # Example of `refs` directory
+ # ── refs
+ # ├── main
+ # └── refs
+ # └── pr
+ # └── 1
+ if refs_path.is_file():
+ raise CorruptedCacheException(f"Refs directory cannot be a file: {refs_path}")
+
+ for ref_path in refs_path.glob("**/*"):
+ # glob("**/*") iterates over all files and directories -> skip directories
+ if ref_path.is_dir():
+ continue
+
+ ref_name = str(ref_path.relative_to(refs_path))
+ with ref_path.open() as f:
+ commit_hash = f.read()
+
+ refs_by_hash[commit_hash].add(ref_name)
+
+ # Scan snapshots directory
+ cached_revisions: Set[CachedRevisionInfo] = set()
+ for revision_path in snapshots_path.iterdir():
+ # Ignore OS-created helper files
+ if revision_path.name in FILES_TO_IGNORE:
+ continue
+ if revision_path.is_file():
+ raise CorruptedCacheException(f"Snapshots folder corrupted. Found a file: {revision_path}")
+
+ cached_files = set()
+ for file_path in revision_path.glob("**/*"):
+ # glob("**/*") iterates over all files and directories -> skip directories
+ if file_path.is_dir():
+ continue
+
+ blob_path = Path(file_path).resolve()
+ if not blob_path.exists():
+ raise CorruptedCacheException(f"Blob missing (broken symlink): {blob_path}")
+
+ if blob_path not in blob_stats:
+ blob_stats[blob_path] = blob_path.stat()
+
+ cached_files.add(
+ CachedFileInfo(
+ file_name=file_path.name,
+ file_path=file_path,
+ size_on_disk=blob_stats[blob_path].st_size,
+ blob_path=blob_path,
+ blob_last_accessed=blob_stats[blob_path].st_atime,
+ blob_last_modified=blob_stats[blob_path].st_mtime,
+ )
+ )
+
+ # Last modified is either the last modified blob file or the revision folder
+ # itself if it is empty
+ if len(cached_files) > 0:
+ revision_last_modified = max(blob_stats[file.blob_path].st_mtime for file in cached_files)
+ else:
+ revision_last_modified = revision_path.stat().st_mtime
+
+ cached_revisions.add(
+ CachedRevisionInfo(
+ commit_hash=revision_path.name,
+ files=frozenset(cached_files),
+ refs=frozenset(refs_by_hash.pop(revision_path.name, set())),
+ size_on_disk=sum(
+ blob_stats[blob_path].st_size for blob_path in set(file.blob_path for file in cached_files)
+ ),
+ snapshot_path=revision_path,
+ last_modified=revision_last_modified,
+ )
+ )
+
+ # Check that all refs referred to an existing revision
+ if len(refs_by_hash) > 0:
+ raise CorruptedCacheException(
+ f"Reference(s) refer to missing commit hashes: {dict(refs_by_hash)} ({repo_path})."
+ )
+
+ # Last modified is either the last modified blob file or the repo folder itself if
+ # no blob files has been found. Same for last accessed.
+ if len(blob_stats) > 0:
+ repo_last_accessed = max(stat.st_atime for stat in blob_stats.values())
+ repo_last_modified = max(stat.st_mtime for stat in blob_stats.values())
+ else:
+ repo_stats = repo_path.stat()
+ repo_last_accessed = repo_stats.st_atime
+ repo_last_modified = repo_stats.st_mtime
+
+ # Build and return frozen structure
+ return CachedRepoInfo(
+ nb_files=len(blob_stats),
+ repo_id=repo_id,
+ repo_path=repo_path,
+ repo_type=repo_type, # type: ignore
+ revisions=frozenset(cached_revisions),
+ size_on_disk=sum(stat.st_size for stat in blob_stats.values()),
+ last_accessed=repo_last_accessed,
+ last_modified=repo_last_modified,
+ )
+
+
+def _format_size(num: int) -> str:
+ """Format size in bytes into a human-readable string.
+
+ Taken from https://stackoverflow.com/a/1094933
+ """
+ num_f = float(num)
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
+ if abs(num_f) < 1000.0:
+ return f"{num_f:3.1f}{unit}"
+ num_f /= 1000.0
+ return f"{num_f:.1f}Y"
+
+
+_TIMESINCE_CHUNKS = (
+ # Label, divider, max value
+ ("second", 1, 60),
+ ("minute", 60, 60),
+ ("hour", 60 * 60, 24),
+ ("day", 60 * 60 * 24, 6),
+ ("week", 60 * 60 * 24 * 7, 6),
+ ("month", 60 * 60 * 24 * 30, 11),
+ ("year", 60 * 60 * 24 * 365, None),
+)
+
+
+def _format_timesince(ts: float) -> str:
+ """Format timestamp in seconds into a human-readable string, relative to now.
+
+ Vaguely inspired by Django's `timesince` formatter.
+ """
+ delta = time.time() - ts
+ if delta < 20:
+ return "a few seconds ago"
+ for label, divider, max_value in _TIMESINCE_CHUNKS: # noqa: B007
+ value = round(delta / divider)
+ if max_value is not None and value <= max_value:
+ break
+ return f"{value} {label}{'s' if value > 1 else ''} ago"
+
+
+def _try_delete_path(path: Path, path_type: str) -> None:
+ """Try to delete a local file or folder.
+
+ If the path does not exists, error is logged as a warning and then ignored.
+
+ Args:
+ path (`Path`)
+ Path to delete. Can be a file or a folder.
+ path_type (`str`)
+ What path are we deleting ? Only for logging purposes. Example: "snapshot".
+ """
+ logger.info(f"Delete {path_type}: {path}")
+ try:
+ if path.is_file():
+ os.remove(path)
+ else:
+ shutil.rmtree(path)
+ except FileNotFoundError:
+ logger.warning(f"Couldn't delete {path_type}: file not found ({path})", exc_info=True)
+ except PermissionError:
+ logger.warning(f"Couldn't delete {path_type}: permission denied ({path})", exc_info=True)
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0af032ae6a68f03676ad7fdb8e483248d9853f8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_chunk_utils.py
@@ -0,0 +1,65 @@
+# coding=utf-8
+# Copyright 2022-present, the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains a utility to iterate by chunks over an iterator."""
+
+import itertools
+from typing import Iterable, TypeVar
+
+
+T = TypeVar("T")
+
+
+def chunk_iterable(iterable: Iterable[T], chunk_size: int) -> Iterable[Iterable[T]]:
+ """Iterates over an iterator chunk by chunk.
+
+ Taken from https://stackoverflow.com/a/8998040.
+ See also https://github.com/huggingface/huggingface_hub/pull/920#discussion_r938793088.
+
+ Args:
+ iterable (`Iterable`):
+ The iterable on which we want to iterate.
+ chunk_size (`int`):
+ Size of the chunks. Must be a strictly positive integer (e.g. >0).
+
+ Example:
+
+ ```python
+ >>> from huggingface_hub.utils import chunk_iterable
+
+ >>> for items in chunk_iterable(range(17), chunk_size=8):
+ ... print(items)
+ # [0, 1, 2, 3, 4, 5, 6, 7]
+ # [8, 9, 10, 11, 12, 13, 14, 15]
+ # [16] # smaller last chunk
+ ```
+
+ Raises:
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If `chunk_size` <= 0.
+
+
+ The last chunk can be smaller than `chunk_size`.
+
+ """
+ if not isinstance(chunk_size, int) or chunk_size <= 0:
+ raise ValueError("`chunk_size` must be a strictly positive integer (>0).")
+
+ iterator = iter(iterable)
+ while True:
+ try:
+ next_item = next(iterator)
+ except StopIteration:
+ return
+ yield itertools.chain((next_item,), itertools.islice(iterator, chunk_size - 1))
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cb8d6e418c76accd1ecd61158b4bdd265e12f71
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_deprecation.py
@@ -0,0 +1,136 @@
+import warnings
+from functools import wraps
+from inspect import Parameter, signature
+from typing import Iterable, Optional
+
+
+def _deprecate_positional_args(*, version: str):
+ """Decorator for methods that issues warnings for positional arguments.
+ Using the keyword-only argument syntax in pep 3102, arguments after the
+ * will issue a warning when passed as a positional argument.
+
+ Args:
+ version (`str`):
+ The version when positional arguments will result in error.
+ """
+
+ def _inner_deprecate_positional_args(f):
+ sig = signature(f)
+ kwonly_args = []
+ all_args = []
+ for name, param in sig.parameters.items():
+ if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
+ all_args.append(name)
+ elif param.kind == Parameter.KEYWORD_ONLY:
+ kwonly_args.append(name)
+
+ @wraps(f)
+ def inner_f(*args, **kwargs):
+ extra_args = len(args) - len(all_args)
+ if extra_args <= 0:
+ return f(*args, **kwargs)
+ # extra_args > 0
+ args_msg = [
+ f"{name}='{arg}'" if isinstance(arg, str) else f"{name}={arg}"
+ for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
+ ]
+ args_msg = ", ".join(args_msg)
+ warnings.warn(
+ f"Deprecated positional argument(s) used in '{f.__name__}': pass"
+ f" {args_msg} as keyword args. From version {version} passing these"
+ " as positional arguments will result in an error,",
+ FutureWarning,
+ )
+ kwargs.update(zip(sig.parameters, args))
+ return f(**kwargs)
+
+ return inner_f
+
+ return _inner_deprecate_positional_args
+
+
+def _deprecate_arguments(
+ *,
+ version: str,
+ deprecated_args: Iterable[str],
+ custom_message: Optional[str] = None,
+):
+ """Decorator to issue warnings when using deprecated arguments.
+
+ TODO: could be useful to be able to set a custom error message.
+
+ Args:
+ version (`str`):
+ The version when deprecated arguments will result in error.
+ deprecated_args (`List[str]`):
+ List of the arguments to be deprecated.
+ custom_message (`str`, *optional*):
+ Warning message that is raised. If not passed, a default warning message
+ will be created.
+ """
+
+ def _inner_deprecate_positional_args(f):
+ sig = signature(f)
+
+ @wraps(f)
+ def inner_f(*args, **kwargs):
+ # Check for used deprecated arguments
+ used_deprecated_args = []
+ for _, parameter in zip(args, sig.parameters.values()):
+ if parameter.name in deprecated_args:
+ used_deprecated_args.append(parameter.name)
+ for kwarg_name, kwarg_value in kwargs.items():
+ if (
+ # If argument is deprecated but still used
+ kwarg_name in deprecated_args
+ # And then the value is not the default value
+ and kwarg_value != sig.parameters[kwarg_name].default
+ ):
+ used_deprecated_args.append(kwarg_name)
+
+ # Warn and proceed
+ if len(used_deprecated_args) > 0:
+ message = (
+ f"Deprecated argument(s) used in '{f.__name__}':"
+ f" {', '.join(used_deprecated_args)}. Will not be supported from"
+ f" version '{version}'."
+ )
+ if custom_message is not None:
+ message += "\n\n" + custom_message
+ warnings.warn(message, FutureWarning)
+ return f(*args, **kwargs)
+
+ return inner_f
+
+ return _inner_deprecate_positional_args
+
+
+def _deprecate_method(*, version: str, message: Optional[str] = None):
+ """Decorator to issue warnings when using a deprecated method.
+
+ Args:
+ version (`str`):
+ The version when deprecated arguments will result in error.
+ message (`str`, *optional*):
+ Warning message that is raised. If not passed, a default warning message
+ will be created.
+ """
+
+ def _inner_deprecate_method(f):
+ name = f.__name__
+ if name == "__init__":
+ name = f.__qualname__.split(".")[0] # class name instead of method name
+
+ @wraps(f)
+ def inner_f(*args, **kwargs):
+ warning_message = (
+ f"'{name}' (from '{f.__module__}') is deprecated and will be removed from version '{version}'."
+ )
+ if message is not None:
+ warning_message += " " + message
+ warnings.warn(warning_message, FutureWarning)
+ return f(*args, **kwargs)
+
+ return inner_f
+
+ return _inner_deprecate_method
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e76afb6ceab094a06ca06e41e406a9236c92e8a3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_headers.py
@@ -0,0 +1,239 @@
+# coding=utf-8
+# Copyright 2022-present, the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains utilities to handle headers to send in calls to Huggingface Hub."""
+
+from typing import Dict, Optional, Union
+
+from huggingface_hub.errors import LocalTokenNotFoundError
+
+from .. import constants
+from ._runtime import (
+ get_fastai_version,
+ get_fastcore_version,
+ get_hf_hub_version,
+ get_python_version,
+ get_tf_version,
+ get_torch_version,
+ is_fastai_available,
+ is_fastcore_available,
+ is_tf_available,
+ is_torch_available,
+)
+from ._token import get_token
+from ._validators import validate_hf_hub_args
+
+
+@validate_hf_hub_args
+def build_hf_headers(
+ *,
+ token: Optional[Union[bool, str]] = None,
+ is_write_action: bool = False,
+ library_name: Optional[str] = None,
+ library_version: Optional[str] = None,
+ user_agent: Union[Dict, str, None] = None,
+ headers: Optional[Dict[str, str]] = None,
+) -> Dict[str, str]:
+ """
+ Build headers dictionary to send in a HF Hub call.
+
+ By default, authorization token is always provided either from argument (explicit
+ use) or retrieved from the cache (implicit use). To explicitly avoid sending the
+ token to the Hub, set `token=False` or set the `HF_HUB_DISABLE_IMPLICIT_TOKEN`
+ environment variable.
+
+ In case of an API call that requires write access, an error is thrown if token is
+ `None` or token is an organization token (starting with `"api_org***"`).
+
+ In addition to the auth header, a user-agent is added to provide information about
+ the installed packages (versions of python, huggingface_hub, torch, tensorflow,
+ fastai and fastcore).
+
+ Args:
+ token (`str`, `bool`, *optional*):
+ The token to be sent in authorization header for the Hub call:
+ - if a string, it is used as the Hugging Face token
+ - if `True`, the token is read from the machine (cache or env variable)
+ - if `False`, authorization header is not set
+ - if `None`, the token is read from the machine only except if
+ `HF_HUB_DISABLE_IMPLICIT_TOKEN` env variable is set.
+ is_write_action (`bool`, default to `False`):
+ Set to True if the API call requires a write access. If `True`, the token
+ will be validated (cannot be `None`, cannot start by `"api_org***"`).
+ library_name (`str`, *optional*):
+ The name of the library that is making the HTTP request. Will be added to
+ the user-agent header.
+ library_version (`str`, *optional*):
+ The version of the library that is making the HTTP request. Will be added
+ to the user-agent header.
+ user_agent (`str`, `dict`, *optional*):
+ The user agent info in the form of a dictionary or a single string. It will
+ be completed with information about the installed packages.
+ headers (`dict`, *optional*):
+ Additional headers to include in the request. Those headers take precedence
+ over the ones generated by this function.
+
+ Returns:
+ A `Dict` of headers to pass in your API call.
+
+ Example:
+ ```py
+ >>> build_hf_headers(token="hf_***") # explicit token
+ {"authorization": "Bearer hf_***", "user-agent": ""}
+
+ >>> build_hf_headers(token=True) # explicitly use cached token
+ {"authorization": "Bearer hf_***",...}
+
+ >>> build_hf_headers(token=False) # explicitly don't use cached token
+ {"user-agent": ...}
+
+ >>> build_hf_headers() # implicit use of the cached token
+ {"authorization": "Bearer hf_***",...}
+
+ # HF_HUB_DISABLE_IMPLICIT_TOKEN=True # to set as env variable
+ >>> build_hf_headers() # token is not sent
+ {"user-agent": ...}
+
+ >>> build_hf_headers(token="api_org_***", is_write_action=True)
+ ValueError: You must use your personal account token for write-access methods.
+
+ >>> build_hf_headers(library_name="transformers", library_version="1.2.3")
+ {"authorization": ..., "user-agent": "transformers/1.2.3; hf_hub/0.10.2; python/3.10.4; tensorflow/1.55"}
+ ```
+
+ Raises:
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If organization token is passed and "write" access is required.
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If "write" access is required but token is not passed and not saved locally.
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
+ If `token=True` but token is not saved locally.
+ """
+ # Get auth token to send
+ token_to_send = get_token_to_send(token)
+ _validate_token_to_send(token_to_send, is_write_action=is_write_action)
+
+ # Combine headers
+ hf_headers = {
+ "user-agent": _http_user_agent(
+ library_name=library_name,
+ library_version=library_version,
+ user_agent=user_agent,
+ )
+ }
+ if token_to_send is not None:
+ hf_headers["authorization"] = f"Bearer {token_to_send}"
+ if headers is not None:
+ hf_headers.update(headers)
+ return hf_headers
+
+
+def get_token_to_send(token: Optional[Union[bool, str]]) -> Optional[str]:
+ """Select the token to send from either `token` or the cache."""
+ # Case token is explicitly provided
+ if isinstance(token, str):
+ return token
+
+ # Case token is explicitly forbidden
+ if token is False:
+ return None
+
+ # Token is not provided: we get it from local cache
+ cached_token = get_token()
+
+ # Case token is explicitly required
+ if token is True:
+ if cached_token is None:
+ raise LocalTokenNotFoundError(
+ "Token is required (`token=True`), but no token found. You"
+ " need to provide a token or be logged in to Hugging Face with"
+ " `huggingface-cli login` or `huggingface_hub.login`. See"
+ " https://huggingface.co/settings/tokens."
+ )
+ return cached_token
+
+ # Case implicit use of the token is forbidden by env variable
+ if constants.HF_HUB_DISABLE_IMPLICIT_TOKEN:
+ return None
+
+ # Otherwise: we use the cached token as the user has not explicitly forbidden it
+ return cached_token
+
+
+def _validate_token_to_send(token: Optional[str], is_write_action: bool) -> None:
+ if is_write_action:
+ if token is None:
+ raise ValueError(
+ "Token is required (write-access action) but no token found. You need"
+ " to provide a token or be logged in to Hugging Face with"
+ " `huggingface-cli login` or `huggingface_hub.login`. See"
+ " https://huggingface.co/settings/tokens."
+ )
+ if token.startswith("api_org"):
+ raise ValueError(
+ "You must use your personal account token for write-access methods. To"
+ " generate a write-access token, go to"
+ " https://huggingface.co/settings/tokens"
+ )
+
+
+def _http_user_agent(
+ *,
+ library_name: Optional[str] = None,
+ library_version: Optional[str] = None,
+ user_agent: Union[Dict, str, None] = None,
+) -> str:
+ """Format a user-agent string containing information about the installed packages.
+
+ Args:
+ library_name (`str`, *optional*):
+ The name of the library that is making the HTTP request.
+ library_version (`str`, *optional*):
+ The version of the library that is making the HTTP request.
+ user_agent (`str`, `dict`, *optional*):
+ The user agent info in the form of a dictionary or a single string.
+
+ Returns:
+ The formatted user-agent string.
+ """
+ if library_name is not None:
+ ua = f"{library_name}/{library_version}"
+ else:
+ ua = "unknown/None"
+ ua += f"; hf_hub/{get_hf_hub_version()}"
+ ua += f"; python/{get_python_version()}"
+
+ if not constants.HF_HUB_DISABLE_TELEMETRY:
+ if is_torch_available():
+ ua += f"; torch/{get_torch_version()}"
+ if is_tf_available():
+ ua += f"; tensorflow/{get_tf_version()}"
+ if is_fastai_available():
+ ua += f"; fastai/{get_fastai_version()}"
+ if is_fastcore_available():
+ ua += f"; fastcore/{get_fastcore_version()}"
+
+ if isinstance(user_agent, dict):
+ ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
+ elif isinstance(user_agent, str):
+ ua += "; " + user_agent
+
+ return _deduplicate_user_agent(ua)
+
+
+def _deduplicate_user_agent(user_agent: str) -> str:
+ """Deduplicate redundant information in the generated user-agent."""
+ # Split around ";" > Strip whitespaces > Store as dict keys (ensure unicity) > format back as string
+ # Order is implicitly preserved by dictionary structure (see https://stackoverflow.com/a/53657523).
+ return "; ".join({key.strip(): None for key in user_agent.split(";")}.keys())
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_token.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_token.py
new file mode 100644
index 0000000000000000000000000000000000000000..3218bb45c0737f67912c9c257734c463f5871255
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_token.py
@@ -0,0 +1,130 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains an helper to get the token from machine (env variable, secret or config file)."""
+
+import os
+import warnings
+from pathlib import Path
+from threading import Lock
+from typing import Optional
+
+from .. import constants
+from ._runtime import is_google_colab
+
+
+_IS_GOOGLE_COLAB_CHECKED = False
+_GOOGLE_COLAB_SECRET_LOCK = Lock()
+_GOOGLE_COLAB_SECRET: Optional[str] = None
+
+
+def get_token() -> Optional[str]:
+ """
+ Get token if user is logged in.
+
+ Note: in most cases, you should use [`huggingface_hub.utils.build_hf_headers`] instead. This method is only useful
+ if you want to retrieve the token for other purposes than sending an HTTP request.
+
+ Token is retrieved in priority from the `HF_TOKEN` environment variable. Otherwise, we read the token file located
+ in the Hugging Face home folder. Returns None if user is not logged in. To log in, use [`login`] or
+ `huggingface-cli login`.
+
+ Returns:
+ `str` or `None`: The token, `None` if it doesn't exist.
+ """
+ return _get_token_from_google_colab() or _get_token_from_environment() or _get_token_from_file()
+
+
+def _get_token_from_google_colab() -> Optional[str]:
+ """Get token from Google Colab secrets vault using `google.colab.userdata.get(...)`.
+
+ Token is read from the vault only once per session and then stored in a global variable to avoid re-requesting
+ access to the vault.
+ """
+ if not is_google_colab():
+ return None
+
+ # `google.colab.userdata` is not thread-safe
+ # This can lead to a deadlock if multiple threads try to access it at the same time
+ # (typically when using `snapshot_download`)
+ # => use a lock
+ # See https://github.com/huggingface/huggingface_hub/issues/1952 for more details.
+ with _GOOGLE_COLAB_SECRET_LOCK:
+ global _GOOGLE_COLAB_SECRET
+ global _IS_GOOGLE_COLAB_CHECKED
+
+ if _IS_GOOGLE_COLAB_CHECKED: # request access only once
+ return _GOOGLE_COLAB_SECRET
+
+ try:
+ from google.colab import userdata
+ from google.colab.errors import Error as ColabError
+ except ImportError:
+ return None
+
+ try:
+ token = userdata.get("HF_TOKEN")
+ _GOOGLE_COLAB_SECRET = _clean_token(token)
+ except userdata.NotebookAccessError:
+ # Means the user has a secret call `HF_TOKEN` and got a popup "please grand access to HF_TOKEN" and refused it
+ # => warn user but ignore error => do not re-request access to user
+ warnings.warn(
+ "\nAccess to the secret `HF_TOKEN` has not been granted on this notebook."
+ "\nYou will not be requested again."
+ "\nPlease restart the session if you want to be prompted again."
+ )
+ _GOOGLE_COLAB_SECRET = None
+ except userdata.SecretNotFoundError:
+ # Means the user did not define a `HF_TOKEN` secret => warn
+ warnings.warn(
+ "\nThe secret `HF_TOKEN` does not exist in your Colab secrets."
+ "\nTo authenticate with the Hugging Face Hub, create a token in your settings tab "
+ "(https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session."
+ "\nYou will be able to reuse this secret in all of your notebooks."
+ "\nPlease note that authentication is recommended but still optional to access public models or datasets."
+ )
+ _GOOGLE_COLAB_SECRET = None
+ except ColabError as e:
+ # Something happen but we don't know what => recommend to open a GitHub issue
+ warnings.warn(
+ f"\nError while fetching `HF_TOKEN` secret value from your vault: '{str(e)}'."
+ "\nYou are not authenticated with the Hugging Face Hub in this notebook."
+ "\nIf the error persists, please let us know by opening an issue on GitHub "
+ "(https://github.com/huggingface/huggingface_hub/issues/new)."
+ )
+ _GOOGLE_COLAB_SECRET = None
+
+ _IS_GOOGLE_COLAB_CHECKED = True
+ return _GOOGLE_COLAB_SECRET
+
+
+def _get_token_from_environment() -> Optional[str]:
+ # `HF_TOKEN` has priority (keep `HUGGING_FACE_HUB_TOKEN` for backward compatibility)
+ return _clean_token(os.environ.get("HF_TOKEN") or os.environ.get("HUGGING_FACE_HUB_TOKEN"))
+
+
+def _get_token_from_file() -> Optional[str]:
+ try:
+ return _clean_token(Path(constants.HF_TOKEN_PATH).read_text())
+ except FileNotFoundError:
+ return None
+
+
+def _clean_token(token: Optional[str]) -> Optional[str]:
+ """Clean token by removing trailing and leading spaces and newlines.
+
+ If token is an empty string, return None.
+ """
+ if token is None:
+ return None
+ return token.replace("\r", "").replace("\n", "").strip() or None
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..27833f28e3e2030680fb72b95a547521bc08831b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py
@@ -0,0 +1,226 @@
+# coding=utf-8
+# Copyright 2022-present, the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains utilities to validate argument values in `huggingface_hub`."""
+
+import inspect
+import re
+import warnings
+from functools import wraps
+from itertools import chain
+from typing import Any, Dict
+
+from huggingface_hub.errors import HFValidationError
+
+from ._typing import CallableT
+
+
+REPO_ID_REGEX = re.compile(
+ r"""
+ ^
+ (\b[\w\-.]+\b/)? # optional namespace (username or organization)
+ \b # starts with a word boundary
+ [\w\-.]{1,96} # repo_name: alphanumeric + . _ -
+ \b # ends with a word boundary
+ $
+ """,
+ flags=re.VERBOSE,
+)
+
+
+def validate_hf_hub_args(fn: CallableT) -> CallableT:
+ """Validate values received as argument for any public method of `huggingface_hub`.
+
+ The goal of this decorator is to harmonize validation of arguments reused
+ everywhere. By default, all defined validators are tested.
+
+ Validators:
+ - [`~utils.validate_repo_id`]: `repo_id` must be `"repo_name"`
+ or `"namespace/repo_name"`. Namespace is a username or an organization.
+ - [`~utils.smoothly_deprecate_use_auth_token`]: Use `token` instead of
+ `use_auth_token` (only if `use_auth_token` is not expected by the decorated
+ function - in practice, always the case in `huggingface_hub`).
+
+ Example:
+ ```py
+ >>> from huggingface_hub.utils import validate_hf_hub_args
+
+ >>> @validate_hf_hub_args
+ ... def my_cool_method(repo_id: str):
+ ... print(repo_id)
+
+ >>> my_cool_method(repo_id="valid_repo_id")
+ valid_repo_id
+
+ >>> my_cool_method("other..repo..id")
+ huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
+
+ >>> my_cool_method(repo_id="other..repo..id")
+ huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
+
+ >>> @validate_hf_hub_args
+ ... def my_cool_auth_method(token: str):
+ ... print(token)
+
+ >>> my_cool_auth_method(token="a token")
+ "a token"
+
+ >>> my_cool_auth_method(use_auth_token="a use_auth_token")
+ "a use_auth_token"
+
+ >>> my_cool_auth_method(token="a token", use_auth_token="a use_auth_token")
+ UserWarning: Both `token` and `use_auth_token` are passed (...)
+ "a token"
+ ```
+
+ Raises:
+ [`~utils.HFValidationError`]:
+ If an input is not valid.
+ """
+ # TODO: add an argument to opt-out validation for specific argument?
+ signature = inspect.signature(fn)
+
+ # Should the validator switch `use_auth_token` values to `token`? In practice, always
+ # True in `huggingface_hub`. Might not be the case in a downstream library.
+ check_use_auth_token = "use_auth_token" not in signature.parameters and "token" in signature.parameters
+
+ @wraps(fn)
+ def _inner_fn(*args, **kwargs):
+ has_token = False
+ for arg_name, arg_value in chain(
+ zip(signature.parameters, args), # Args values
+ kwargs.items(), # Kwargs values
+ ):
+ if arg_name in ["repo_id", "from_id", "to_id"]:
+ validate_repo_id(arg_value)
+
+ elif arg_name == "token" and arg_value is not None:
+ has_token = True
+
+ if check_use_auth_token:
+ kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
+
+ return fn(*args, **kwargs)
+
+ return _inner_fn # type: ignore
+
+
+def validate_repo_id(repo_id: str) -> None:
+ """Validate `repo_id` is valid.
+
+ This is not meant to replace the proper validation made on the Hub but rather to
+ avoid local inconsistencies whenever possible (example: passing `repo_type` in the
+ `repo_id` is forbidden).
+
+ Rules:
+ - Between 1 and 96 characters.
+ - Either "repo_name" or "namespace/repo_name"
+ - [a-zA-Z0-9] or "-", "_", "."
+ - "--" and ".." are forbidden
+
+ Valid: `"foo"`, `"foo/bar"`, `"123"`, `"Foo-BAR_foo.bar123"`
+
+ Not valid: `"datasets/foo/bar"`, `".repo_id"`, `"foo--bar"`, `"foo.git"`
+
+ Example:
+ ```py
+ >>> from huggingface_hub.utils import validate_repo_id
+ >>> validate_repo_id(repo_id="valid_repo_id")
+ >>> validate_repo_id(repo_id="other..repo..id")
+ huggingface_hub.utils._validators.HFValidationError: Cannot have -- or .. in repo_id: 'other..repo..id'.
+ ```
+
+ Discussed in https://github.com/huggingface/huggingface_hub/issues/1008.
+ In moon-landing (internal repository):
+ - https://github.com/huggingface/moon-landing/blob/main/server/lib/Names.ts#L27
+ - https://github.com/huggingface/moon-landing/blob/main/server/views/components/NewRepoForm/NewRepoForm.svelte#L138
+ """
+ if not isinstance(repo_id, str):
+ # Typically, a Path is not a repo_id
+ raise HFValidationError(f"Repo id must be a string, not {type(repo_id)}: '{repo_id}'.")
+
+ if repo_id.count("/") > 1:
+ raise HFValidationError(
+ "Repo id must be in the form 'repo_name' or 'namespace/repo_name':"
+ f" '{repo_id}'. Use `repo_type` argument if needed."
+ )
+
+ if not REPO_ID_REGEX.match(repo_id):
+ raise HFValidationError(
+ "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are"
+ " forbidden, '-' and '.' cannot start or end the name, max length is 96:"
+ f" '{repo_id}'."
+ )
+
+ if "--" in repo_id or ".." in repo_id:
+ raise HFValidationError(f"Cannot have -- or .. in repo_id: '{repo_id}'.")
+
+ if repo_id.endswith(".git"):
+ raise HFValidationError(f"Repo_id cannot end by '.git': '{repo_id}'.")
+
+
+def smoothly_deprecate_use_auth_token(fn_name: str, has_token: bool, kwargs: Dict[str, Any]) -> Dict[str, Any]:
+ """Smoothly deprecate `use_auth_token` in the `huggingface_hub` codebase.
+
+ The long-term goal is to remove any mention of `use_auth_token` in the codebase in
+ favor of a unique and less verbose `token` argument. This will be done a few steps:
+
+ 0. Step 0: methods that require a read-access to the Hub use the `use_auth_token`
+ argument (`str`, `bool` or `None`). Methods requiring write-access have a `token`
+ argument (`str`, `None`). This implicit rule exists to be able to not send the
+ token when not necessary (`use_auth_token=False`) even if logged in.
+
+ 1. Step 1: we want to harmonize everything and use `token` everywhere (supporting
+ `token=False` for read-only methods). In order not to break existing code, if
+ `use_auth_token` is passed to a function, the `use_auth_token` value is passed
+ as `token` instead, without any warning.
+ a. Corner case: if both `use_auth_token` and `token` values are passed, a warning
+ is thrown and the `use_auth_token` value is ignored.
+
+ 2. Step 2: Once it is release, we should push downstream libraries to switch from
+ `use_auth_token` to `token` as much as possible, but without throwing a warning
+ (e.g. manually create issues on the corresponding repos).
+
+ 3. Step 3: After a transitional period (6 months e.g. until April 2023?), we update
+ `huggingface_hub` to throw a warning on `use_auth_token`. Hopefully, very few
+ users will be impacted as it would have already been fixed.
+ In addition, unit tests in `huggingface_hub` must be adapted to expect warnings
+ to be thrown (but still use `use_auth_token` as before).
+
+ 4. Step 4: After a normal deprecation cycle (3 releases ?), remove this validator.
+ `use_auth_token` will definitely not be supported.
+ In addition, we update unit tests in `huggingface_hub` to use `token` everywhere.
+
+ This has been discussed in:
+ - https://github.com/huggingface/huggingface_hub/issues/1094.
+ - https://github.com/huggingface/huggingface_hub/pull/928
+ - (related) https://github.com/huggingface/huggingface_hub/pull/1064
+ """
+ new_kwargs = kwargs.copy() # do not mutate input !
+
+ use_auth_token = new_kwargs.pop("use_auth_token", None) # remove from kwargs
+ if use_auth_token is not None:
+ if has_token:
+ warnings.warn(
+ "Both `token` and `use_auth_token` are passed to"
+ f" `{fn_name}` with non-None values. `token` is now the"
+ " preferred argument to pass a User Access Token."
+ " `use_auth_token` value will be ignored."
+ )
+ else:
+ # `token` argument is not passed and a non-None value is passed in
+ # `use_auth_token` => use `use_auth_token` value as `token` kwarg.
+ new_kwargs["token"] = use_auth_token
+
+ return new_kwargs
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac52ce85a0f45932aeefc702eb44828ad2e17871
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/endpoint_helpers.py
@@ -0,0 +1,250 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Helpful utility functions and classes in relation to exploring API endpoints
+with the aim for a user-friendly interface.
+"""
+
+import math
+import re
+import warnings
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Optional, Union
+
+from ..repocard_data import ModelCardData
+
+
+if TYPE_CHECKING:
+ from ..hf_api import ModelInfo
+
+
+def _is_emission_within_treshold(model_info: "ModelInfo", minimum_threshold: float, maximum_threshold: float) -> bool:
+ """Checks if a model's emission is within a given threshold.
+
+ Args:
+ model_info (`ModelInfo`):
+ A model info object containing the model's emission information.
+ minimum_threshold (`float`):
+ A minimum carbon threshold to filter by, such as 1.
+ maximum_threshold (`float`):
+ A maximum carbon threshold to filter by, such as 10.
+
+ Returns:
+ `bool`: Whether the model's emission is within the given threshold.
+ """
+ if minimum_threshold is None and maximum_threshold is None:
+ raise ValueError("Both `minimum_threshold` and `maximum_threshold` cannot both be `None`")
+ if minimum_threshold is None:
+ minimum_threshold = -1
+ if maximum_threshold is None:
+ maximum_threshold = math.inf
+
+ card_data = getattr(model_info, "card_data", None)
+ if card_data is None or not isinstance(card_data, (dict, ModelCardData)):
+ return False
+
+ # Get CO2 emission metadata
+ emission = card_data.get("co2_eq_emissions", None)
+ if isinstance(emission, dict):
+ emission = emission["emissions"]
+ if not emission:
+ return False
+
+ # Filter out if value is missing or out of range
+ matched = re.search(r"\d+\.\d+|\d+", str(emission))
+ if matched is None:
+ return False
+
+ emission_value = float(matched.group(0))
+ return minimum_threshold <= emission_value <= maximum_threshold
+
+
+@dataclass
+class DatasetFilter:
+ """
+ A class that converts human-readable dataset search parameters into ones
+ compatible with the REST API. For all parameters capitalization does not
+ matter.
+
+
+
+ The `DatasetFilter` class is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to [`list_datasets`].
+
+
+
+ Args:
+ author (`str`, *optional*):
+ A string that can be used to identify datasets on
+ the Hub by the original uploader (author or organization), such as
+ `facebook` or `huggingface`.
+ benchmark (`str` or `List`, *optional*):
+ A string or list of strings that can be used to identify datasets on
+ the Hub by their official benchmark.
+ dataset_name (`str`, *optional*):
+ A string or list of strings that can be used to identify datasets on
+ the Hub by its name, such as `SQAC` or `wikineural`
+ language_creators (`str` or `List`, *optional*):
+ A string or list of strings that can be used to identify datasets on
+ the Hub with how the data was curated, such as `crowdsourced` or
+ `machine_generated`.
+ language (`str` or `List`, *optional*):
+ A string or list of strings representing a two-character language to
+ filter datasets by on the Hub.
+ multilinguality (`str` or `List`, *optional*):
+ A string or list of strings representing a filter for datasets that
+ contain multiple languages.
+ size_categories (`str` or `List`, *optional*):
+ A string or list of strings that can be used to identify datasets on
+ the Hub by the size of the dataset such as `100K>> from huggingface_hub import DatasetFilter
+
+ >>> # Using author
+ >>> new_filter = DatasetFilter(author="facebook")
+
+ >>> # Using benchmark
+ >>> new_filter = DatasetFilter(benchmark="raft")
+
+ >>> # Using dataset_name
+ >>> new_filter = DatasetFilter(dataset_name="wikineural")
+
+ >>> # Using language_creator
+ >>> new_filter = DatasetFilter(language_creator="crowdsourced")
+
+ >>> # Using language
+ >>> new_filter = DatasetFilter(language="en")
+
+ >>> # Using multilinguality
+ >>> new_filter = DatasetFilter(multilinguality="multilingual")
+
+ >>> # Using size_categories
+ >>> new_filter = DatasetFilter(size_categories="100K>> # Using task_categories
+ >>> new_filter = DatasetFilter(task_categories="audio_classification")
+
+ >>> # Using task_ids
+ >>> new_filter = DatasetFilter(task_ids="paraphrase")
+ ```
+ """
+
+ author: Optional[str] = None
+ benchmark: Optional[Union[str, List[str]]] = None
+ dataset_name: Optional[str] = None
+ language_creators: Optional[Union[str, List[str]]] = None
+ language: Optional[Union[str, List[str]]] = None
+ multilinguality: Optional[Union[str, List[str]]] = None
+ size_categories: Optional[Union[str, List[str]]] = None
+ task_categories: Optional[Union[str, List[str]]] = None
+ task_ids: Optional[Union[str, List[str]]] = None
+
+ def __post_init__(self):
+ warnings.warn(
+ "'DatasetFilter' is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to the `list_datasets` method.",
+ category=FutureWarning,
+ )
+
+
+@dataclass
+class ModelFilter:
+ """
+ A class that converts human-readable model search parameters into ones
+ compatible with the REST API. For all parameters capitalization does not
+ matter.
+
+
+
+ The `ModelFilter` class is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to [`list_models`].
+
+
+
+ Args:
+ author (`str`, *optional*):
+ A string that can be used to identify models on the Hub by the
+ original uploader (author or organization), such as `facebook` or
+ `huggingface`.
+ library (`str` or `List`, *optional*):
+ A string or list of strings of foundational libraries models were
+ originally trained from, such as pytorch, tensorflow, or allennlp.
+ language (`str` or `List`, *optional*):
+ A string or list of strings of languages, both by name and country
+ code, such as "en" or "English"
+ model_name (`str`, *optional*):
+ A string that contain complete or partial names for models on the
+ Hub, such as "bert" or "bert-base-cased"
+ task (`str` or `List`, *optional*):
+ A string or list of strings of tasks models were designed for, such
+ as: "fill-mask" or "automatic-speech-recognition"
+ tags (`str` or `List`, *optional*):
+ A string tag or a list of tags to filter models on the Hub by, such
+ as `text-generation` or `spacy`.
+ trained_dataset (`str` or `List`, *optional*):
+ A string tag or a list of string tags of the trained dataset for a
+ model on the Hub.
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import ModelFilter
+
+ >>> # For the author_or_organization
+ >>> new_filter = ModelFilter(author_or_organization="facebook")
+
+ >>> # For the library
+ >>> new_filter = ModelFilter(library="pytorch")
+
+ >>> # For the language
+ >>> new_filter = ModelFilter(language="french")
+
+ >>> # For the model_name
+ >>> new_filter = ModelFilter(model_name="bert")
+
+ >>> # For the task
+ >>> new_filter = ModelFilter(task="text-classification")
+
+ >>> from huggingface_hub import HfApi
+
+ >>> api = HfApi()
+ # To list model tags
+
+ >>> new_filter = ModelFilter(tags="benchmark:raft")
+
+ >>> # Related to the dataset
+ >>> new_filter = ModelFilter(trained_dataset="common_voice")
+ ```
+ """
+
+ author: Optional[str] = None
+ library: Optional[Union[str, List[str]]] = None
+ language: Optional[Union[str, List[str]]] = None
+ model_name: Optional[str] = None
+ task: Optional[Union[str, List[str]]] = None
+ trained_dataset: Optional[Union[str, List[str]]] = None
+ tags: Optional[Union[str, List[str]]] = None
+
+ def __post_init__(self):
+ warnings.warn(
+ "'ModelFilter' is deprecated and will be removed in huggingface_hub>=0.24. Please pass the filter parameters as keyword arguments directly to the `list_models` method.",
+ FutureWarning,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..f232ee0adcfc52dcc18b5ea4d9c913b206521f71
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/insecure_hashlib.py
@@ -0,0 +1,34 @@
+# Taken from https://github.com/mlflow/mlflow/pull/10119
+#
+# DO NOT use this function for security purposes (e.g., password hashing).
+#
+# In Python >= 3.9, insecure hashing algorithms such as MD5 fail in FIPS-compliant
+# environments unless `usedforsecurity=False` is explicitly passed.
+#
+# References:
+# - https://github.com/mlflow/mlflow/issues/9905
+# - https://github.com/mlflow/mlflow/pull/10119
+# - https://docs.python.org/3/library/hashlib.html
+# - https://github.com/huggingface/transformers/pull/27038
+#
+# Usage:
+# ```python
+# # Use
+# from huggingface_hub.utils.insecure_hashlib import sha256
+# # instead of
+# from hashlib import sha256
+#
+# # Use
+# from huggingface_hub.utils import insecure_hashlib
+# # instead of
+# import hashlib
+# ```
+import functools
+import hashlib
+import sys
+
+
+_kwargs = {"usedforsecurity": False} if sys.version_info >= (3, 9) else {}
+md5 = functools.partial(hashlib.md5, **_kwargs)
+sha1 = functools.partial(hashlib.sha1, **_kwargs)
+sha256 = functools.partial(hashlib.sha256, **_kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py
new file mode 100644
index 0000000000000000000000000000000000000000..13bf7a6842d7d2ce101f1af27d4b2740992be745
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/tqdm.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""Utility helpers to handle progress bars in `huggingface_hub`.
+
+Example:
+ 1. Use `huggingface_hub.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
+ 2. To disable progress bars, either use `disable_progress_bars()` helper or set the
+ environment variable `HF_HUB_DISABLE_PROGRESS_BARS` to 1.
+ 3. To re-enable progress bars, use `enable_progress_bars()`.
+ 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
+
+NOTE: Environment variable `HF_HUB_DISABLE_PROGRESS_BARS` has the priority.
+
+Example:
+ ```py
+ >>> from huggingface_hub.utils import are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm
+
+ # Disable progress bars globally
+ >>> disable_progress_bars()
+
+ # Use as normal `tqdm`
+ >>> for _ in tqdm(range(5)):
+ ... pass
+
+ # Still not showing progress bars, as `disable=False` is overwritten to `True`.
+ >>> for _ in tqdm(range(5), disable=False):
+ ... pass
+
+ >>> are_progress_bars_disabled()
+ True
+
+ # Re-enable progress bars globally
+ >>> enable_progress_bars()
+
+ # Progress bar will be shown !
+ >>> for _ in tqdm(range(5)):
+ ... pass
+ 100%|███████████████████████████████████████| 5/5 [00:00<00:00, 117817.53it/s]
+ ```
+
+Group-based control:
+ ```python
+ # Disable progress bars for a specific group
+ >>> disable_progress_bars("peft.foo")
+
+ # Check state of different groups
+ >>> assert not are_progress_bars_disabled("peft"))
+ >>> assert not are_progress_bars_disabled("peft.something")
+ >>> assert are_progress_bars_disabled("peft.foo"))
+ >>> assert are_progress_bars_disabled("peft.foo.bar"))
+
+ # Enable progress bars for a subgroup
+ >>> enable_progress_bars("peft.foo.bar")
+
+ # Check if enabling a subgroup affects the parent group
+ >>> assert are_progress_bars_disabled("peft.foo"))
+ >>> assert not are_progress_bars_disabled("peft.foo.bar"))
+
+ # No progress bar for `name="peft.foo"`
+ >>> for _ in tqdm(range(5), name="peft.foo"):
+ ... pass
+
+ # Progress bar will be shown for `name="peft.foo.bar"`
+ >>> for _ in tqdm(range(5), name="peft.foo.bar"):
+ ... pass
+ 100%|███████████████████████████████████████| 5/5 [00:00<00:00, 117817.53it/s]
+
+ ```
+"""
+
+import io
+import warnings
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Dict, Iterator, Optional, Union
+
+from tqdm.auto import tqdm as old_tqdm
+
+from ..constants import HF_HUB_DISABLE_PROGRESS_BARS
+
+
+# The `HF_HUB_DISABLE_PROGRESS_BARS` environment variable can be True, False, or not set (None),
+# allowing for control over progress bar visibility. When set, this variable takes precedence
+# over programmatic settings, dictating whether progress bars should be shown or hidden globally.
+# Essentially, the environment variable's setting overrides any code-based configurations.
+#
+# If `HF_HUB_DISABLE_PROGRESS_BARS` is not defined (None), it implies that users can manage
+# progress bar visibility through code. By default, progress bars are turned on.
+
+
+progress_bar_states: Dict[str, bool] = {}
+
+
+def disable_progress_bars(name: Optional[str] = None) -> None:
+ """
+ Disable progress bars either globally or for a specified group.
+
+ This function updates the state of progress bars based on a group name.
+ If no group name is provided, all progress bars are disabled. The operation
+ respects the `HF_HUB_DISABLE_PROGRESS_BARS` environment variable's setting.
+
+ Args:
+ name (`str`, *optional*):
+ The name of the group for which to disable the progress bars. If None,
+ progress bars are disabled globally.
+
+ Raises:
+ Warning: If the environment variable precludes changes.
+ """
+ if HF_HUB_DISABLE_PROGRESS_BARS is False:
+ warnings.warn(
+ "Cannot disable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=0` is set and has priority."
+ )
+ return
+
+ if name is None:
+ progress_bar_states.clear()
+ progress_bar_states["_global"] = False
+ else:
+ keys_to_remove = [key for key in progress_bar_states if key.startswith(f"{name}.")]
+ for key in keys_to_remove:
+ del progress_bar_states[key]
+ progress_bar_states[name] = False
+
+
+def enable_progress_bars(name: Optional[str] = None) -> None:
+ """
+ Enable progress bars either globally or for a specified group.
+
+ This function sets the progress bars to enabled for the specified group or globally
+ if no group is specified. The operation is subject to the `HF_HUB_DISABLE_PROGRESS_BARS`
+ environment setting.
+
+ Args:
+ name (`str`, *optional*):
+ The name of the group for which to enable the progress bars. If None,
+ progress bars are enabled globally.
+
+ Raises:
+ Warning: If the environment variable precludes changes.
+ """
+ if HF_HUB_DISABLE_PROGRESS_BARS is True:
+ warnings.warn(
+ "Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has priority."
+ )
+ return
+
+ if name is None:
+ progress_bar_states.clear()
+ progress_bar_states["_global"] = True
+ else:
+ keys_to_remove = [key for key in progress_bar_states if key.startswith(f"{name}.")]
+ for key in keys_to_remove:
+ del progress_bar_states[key]
+ progress_bar_states[name] = True
+
+
+def are_progress_bars_disabled(name: Optional[str] = None) -> bool:
+ """
+ Check if progress bars are disabled globally or for a specific group.
+
+ This function returns whether progress bars are disabled for a given group or globally.
+ It checks the `HF_HUB_DISABLE_PROGRESS_BARS` environment variable first, then the programmatic
+ settings.
+
+ Args:
+ name (`str`, *optional*):
+ The group name to check; if None, checks the global setting.
+
+ Returns:
+ `bool`: True if progress bars are disabled, False otherwise.
+ """
+ if HF_HUB_DISABLE_PROGRESS_BARS is True:
+ return True
+
+ if name is None:
+ return not progress_bar_states.get("_global", True)
+
+ while name:
+ if name in progress_bar_states:
+ return not progress_bar_states[name]
+ name = ".".join(name.split(".")[:-1])
+
+ return not progress_bar_states.get("_global", True)
+
+
+class tqdm(old_tqdm):
+ """
+ Class to override `disable` argument in case progress bars are globally disabled.
+
+ Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
+ """
+
+ def __init__(self, *args, **kwargs):
+ name = kwargs.pop("name", None) # do not pass `name` to `tqdm`
+ if are_progress_bars_disabled(name):
+ kwargs["disable"] = True
+ super().__init__(*args, **kwargs)
+
+ def __delattr__(self, attr: str) -> None:
+ """Fix for https://github.com/huggingface/huggingface_hub/issues/1603"""
+ try:
+ super().__delattr__(attr)
+ except AttributeError:
+ if attr != "_lock":
+ raise
+
+
+@contextmanager
+def tqdm_stream_file(path: Union[Path, str]) -> Iterator[io.BufferedReader]:
+ """
+ Open a file as binary and wrap the `read` method to display a progress bar when it's streamed.
+
+ First implemented in `transformers` in 2019 but removed when switched to git-lfs. Used in `huggingface_hub` to show
+ progress bar when uploading an LFS file to the Hub. See github.com/huggingface/transformers/pull/2078#discussion_r354739608
+ for implementation details.
+
+ Note: currently implementation handles only files stored on disk as it is the most common use case. Could be
+ extended to stream any `BinaryIO` object but we might have to debug some corner cases.
+
+ Example:
+ ```py
+ >>> with tqdm_stream_file("config.json") as f:
+ >>> requests.put(url, data=f)
+ config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
+ ```
+ """
+ if isinstance(path, str):
+ path = Path(path)
+
+ with path.open("rb") as f:
+ total_size = path.stat().st_size
+ pbar = tqdm(
+ unit="B",
+ unit_scale=True,
+ total=total_size,
+ initial=0,
+ desc=path.name,
+ )
+
+ f_read = f.read
+
+ def _inner_read(size: Optional[int] = -1) -> bytes:
+ data = f_read(size)
+ pbar.update(len(data))
+ return data
+
+ f.read = _inner_read # type: ignore
+
+ yield f
+
+ pbar.close()
diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/AUTHORS.txt b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/AUTHORS.txt
new file mode 100644
index 0000000000000000000000000000000000000000..88b904760b5f8950189dd1c2c30da8fb3b22374f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/AUTHORS.txt
@@ -0,0 +1,31 @@
+Numexpr was initially written by David Cooke, and extended to more
+types by Tim Hochberg.
+
+Francesc Alted contributed support for booleans and simple-precision
+floating point types, efficient strided and unaligned array operations
+and multi-threading code.
+
+Ivan Vilata contributed support for strings.
+
+Gregor Thalhammer implemented the support for Intel VML (Vector Math
+Library).
+
+Mark Wiebe added support for the new iterator in NumPy, which allows
+for better performance in more scenarios (like broadcasting,
+fortran-ordered or non-native byte orderings).
+
+Gaëtan de Menten contributed important bug fixes and speed
+enhancements.
+
+Antonio Valentino contributed the port to Python 3.
+
+Google Inc. contributed bug fixes.
+
+David Cox improved readability of the Readme.
+
+Robert A. McLeod contributed bug fixes and ported the documentation to
+numexpr.readthedocs.io. He has served as the maintainer of the package
+since 2016 to 2023.
+
+Teng Liu fixed many bugs, and in particular, contributed valuable fixes
+to the new regex sanitizer for expressions.
diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..eed663cf8b60f10356bdeff476d6307d6d1bc8b2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/numexpr-2.10.0.dist-info/METADATA
@@ -0,0 +1,212 @@
+Metadata-Version: 2.1
+Name: numexpr
+Version: 2.10.0
+Summary: Fast numerical expression evaluator for NumPy
+Home-page: https://github.com/pydata/numexpr
+Author: David M. Cooke, Francesc Alted, and others
+Maintainer: Francesc Alted
+Maintainer-email: faltet@gmail.com
+License: MIT
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Financial and Insurance Industry
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: MacOS
+Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
+License-File: LICENSE.txt
+License-File: AUTHORS.txt
+Requires-Dist: numpy >=1.19.3
+
+======================================================
+NumExpr: Fast numerical expression evaluator for NumPy
+======================================================
+
+:Author: David M. Cooke, Francesc Alted, and others.
+:Maintainer: Francesc Alted
+:Contact: faltet@gmail.com
+:URL: https://github.com/pydata/numexpr
+:Documentation: http://numexpr.readthedocs.io/en/latest/
+:GitHub Actions: |actions|
+:PyPi: |version|
+:DOI: |doi|
+:readthedocs: |docs|
+
+.. |actions| image:: https://github.com/pydata/numexpr/workflows/Build/badge.svg
+ :target: https://github.com/pydata/numexpr/actions
+.. |travis| image:: https://travis-ci.org/pydata/numexpr.png?branch=master
+ :target: https://travis-ci.org/pydata/numexpr
+.. |docs| image:: https://readthedocs.org/projects/numexpr/badge/?version=latest
+ :target: http://numexpr.readthedocs.io/en/latest
+.. |doi| image:: https://zenodo.org/badge/doi/10.5281/zenodo.2483274.svg
+ :target: https://doi.org/10.5281/zenodo.2483274
+.. |version| image:: https://img.shields.io/pypi/v/numexpr
+ :target: https://pypi.python.org/pypi/numexpr
+
+
+What is NumExpr?
+----------------
+
+NumExpr is a fast numerical expression evaluator for NumPy. With it,
+expressions that operate on arrays (like :code:`'3*a+4*b'`) are accelerated
+and use less memory than doing the same calculation in Python.
+
+In addition, its multi-threaded capabilities can make use of all your
+cores -- which generally results in substantial performance scaling compared
+to NumPy.
+
+Last but not least, numexpr can make use of Intel's VML (Vector Math
+Library, normally integrated in its Math Kernel Library, or MKL).
+This allows further acceleration of transcendent expressions.
+
+
+How NumExpr achieves high performance
+-------------------------------------
+
+The main reason why NumExpr achieves better performance than NumPy is
+that it avoids allocating memory for intermediate results. This
+results in better cache utilization and reduces memory access in
+general. Due to this, NumExpr works best with large arrays.
+
+NumExpr parses expressions into its own op-codes that are then used by
+an integrated computing virtual machine. The array operands are split
+into small chunks that easily fit in the cache of the CPU and passed
+to the virtual machine. The virtual machine then applies the
+operations on each chunk. It's worth noting that all temporaries and
+constants in the expression are also chunked. Chunks are distributed among
+the available cores of the CPU, resulting in highly parallelized code
+execution.
+
+The result is that NumExpr can get the most of your machine computing
+capabilities for array-wise computations. Common speed-ups with regard
+to NumPy are usually between 0.95x (for very simple expressions like
+:code:`'a + 1'`) and 4x (for relatively complex ones like :code:`'a*b-4.1*a > 2.5*b'`),
+although much higher speed-ups can be achieved for some functions and complex
+math operations (up to 15x in some cases).
+
+NumExpr performs best on matrices that are too large to fit in L1 CPU cache.
+In order to get a better idea on the different speed-ups that can be achieved
+on your platform, run the provided benchmarks.
+
+Installation
+------------
+
+From wheels
+^^^^^^^^^^^
+
+NumExpr is available for install via `pip` for a wide range of platforms and
+Python versions (which may be browsed at: https://pypi.org/project/numexpr/#files).
+Installation can be performed as::
+
+ pip install numexpr
+
+If you are using the Anaconda or Miniconda distribution of Python you may prefer
+to use the `conda` package manager in this case::
+
+ conda install numexpr
+
+From Source
+^^^^^^^^^^^
+
+On most \*nix systems your compilers will already be present. However if you
+are using a virtual environment with a substantially newer version of Python than
+your system Python you may be prompted to install a new version of `gcc` or `clang`.
+
+For Windows, you will need to install the Microsoft Visual C++ Build Tools
+(which are free) first. The version depends on which version of Python you have
+installed:
+
+https://wiki.python.org/moin/WindowsCompilers
+
+For Python 3.6+ simply installing the latest version of MSVC build tools should
+be sufficient. Note that wheels found via pip do not include MKL support. Wheels
+available via `conda` will have MKL, if the MKL backend is used for NumPy.
+
+See `requirements.txt` for the required version of NumPy.
+
+NumExpr is built in the standard Python way::
+
+ python setup.py build install
+
+You can test `numexpr` with::
+
+ python -c "import numexpr; numexpr.test()"
+
+Do not test NumExpr in the source directory or you will generate import errors.
+
+Enable Intel® MKL support
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+NumExpr includes support for Intel's MKL library. This may provide better
+performance on Intel architectures, mainly when evaluating transcendental
+functions (trigonometrical, exponential, ...).
+
+If you have Intel's MKL, copy the `site.cfg.example` that comes with the
+distribution to `site.cfg` and edit the latter file to provide correct paths to
+the MKL libraries in your system. After doing this, you can proceed with the
+usual building instructions listed above.
+
+Pay attention to the messages during the building process in order to know
+whether MKL has been detected or not. Finally, you can check the speed-ups on
+your machine by running the `bench/vml_timing.py` script (you can play with
+different parameters to the `set_vml_accuracy_mode()` and `set_vml_num_threads()`
+functions in the script so as to see how it would affect performance).
+
+Usage
+-----
+
+::
+
+ >>> import numpy as np
+ >>> import numexpr as ne
+
+ >>> a = np.arange(1e6) # Choose large arrays for better speedups
+ >>> b = np.arange(1e6)
+
+ >>> ne.evaluate("a + 1") # a simple expression
+ array([ 1.00000000e+00, 2.00000000e+00, 3.00000000e+00, ...,
+ 9.99998000e+05, 9.99999000e+05, 1.00000000e+06])
+
+ >>> ne.evaluate("a * b - 4.1 * a > 2.5 * b") # a more complex one
+ array([False, False, False, ..., True, True, True], dtype=bool)
+
+ >>> ne.evaluate("sin(a) + arcsinh(a/b)") # you can also use functions
+ array([ NaN, 1.72284457, 1.79067101, ..., 1.09567006,
+ 0.17523598, -0.09597844])
+
+ >>> s = np.array([b'abba', b'abbb', b'abbcdef'])
+ >>> ne.evaluate("b'abba' == s") # string arrays are supported too
+ array([ True, False, False], dtype=bool)
+
+
+Documentation
+-------------
+
+Please see the official documentation at `numexpr.readthedocs.io `_.
+Included is a user guide, benchmark results, and the reference API.
+
+
+Authors
+-------
+
+Please see `AUTHORS.txt `_.
+
+
+License
+-------
+
+NumExpr is distributed under the `MIT `_ license.
+
+
+.. Local Variables:
+.. mode: text
+.. coding: utf-8
+.. fill-column: 70
+.. End:
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..81241eca637574fb696620b54200d28a0ff53524
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2011-2020 Sergey Astanin and contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..3909a0009159614c1805e56722e8b8d285389796
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/METADATA
@@ -0,0 +1,1149 @@
+Metadata-Version: 2.1
+Name: tabulate
+Version: 0.9.0
+Summary: Pretty-print tabular data
+Author-email: Sergey Astanin
+License: MIT
+Project-URL: Homepage, https://github.com/astanin/python-tabulate
+Classifier: Development Status :: 4 - Beta
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: Software Development :: Libraries
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Provides-Extra: widechars
+Requires-Dist: wcwidth ; extra == 'widechars'
+
+python-tabulate
+===============
+
+Pretty-print tabular data in Python, a library and a command-line
+utility.
+
+The main use cases of the library are:
+
+- printing small tables without hassle: just one function call,
+ formatting is guided by the data itself
+- authoring tabular data for lightweight plain-text markup: multiple
+ output formats suitable for further editing or transformation
+- readable presentation of mixed textual and numeric data: smart
+ column alignment, configurable number formatting, alignment by a
+ decimal point
+
+Installation
+------------
+
+To install the Python library and the command line utility, run:
+
+```shell
+pip install tabulate
+```
+
+The command line utility will be installed as `tabulate` to `bin` on
+Linux (e.g. `/usr/bin`); or as `tabulate.exe` to `Scripts` in your
+Python installation on Windows (e.g. `C:\Python39\Scripts\tabulate.exe`).
+
+You may consider installing the library only for the current user:
+
+```shell
+pip install tabulate --user
+```
+
+In this case the command line utility will be installed to
+`~/.local/bin/tabulate` on Linux and to
+`%APPDATA%\Python\Scripts\tabulate.exe` on Windows.
+
+To install just the library on Unix-like operating systems:
+
+```shell
+TABULATE_INSTALL=lib-only pip install tabulate
+```
+
+On Windows:
+
+```shell
+set TABULATE_INSTALL=lib-only
+pip install tabulate
+```
+
+Build status
+------------
+
+[](https://circleci.com/gh/astanin/python-tabulate/tree/master) [](https://ci.appveyor.com/project/astanin/python-tabulate/branch/master)
+
+Library usage
+-------------
+
+The module provides just one function, `tabulate`, which takes a list of
+lists or another tabular data type as the first argument, and outputs a
+nicely formatted plain-text table:
+
+```pycon
+>>> from tabulate import tabulate
+
+>>> table = [["Sun",696000,1989100000],["Earth",6371,5973.6],
+... ["Moon",1737,73.5],["Mars",3390,641.85]]
+>>> print(tabulate(table))
+----- ------ -------------
+Sun 696000 1.9891e+09
+Earth 6371 5973.6
+Moon 1737 73.5
+Mars 3390 641.85
+----- ------ -------------
+```
+
+The following tabular data types are supported:
+
+- list of lists or another iterable of iterables
+- list or another iterable of dicts (keys as columns)
+- dict of iterables (keys as columns)
+- list of dataclasses (Python 3.7+ only, field names as columns)
+- two-dimensional NumPy array
+- NumPy record arrays (names as columns)
+- pandas.DataFrame
+
+Tabulate is a Python3 library.
+
+### Headers
+
+The second optional argument named `headers` defines a list of column
+headers to be used:
+
+```pycon
+>>> print(tabulate(table, headers=["Planet","R (km)", "mass (x 10^29 kg)"]))
+Planet R (km) mass (x 10^29 kg)
+-------- -------- -------------------
+Sun 696000 1.9891e+09
+Earth 6371 5973.6
+Moon 1737 73.5
+Mars 3390 641.85
+```
+
+If `headers="firstrow"`, then the first row of data is used:
+
+```pycon
+>>> print(tabulate([["Name","Age"],["Alice",24],["Bob",19]],
+... headers="firstrow"))
+Name Age
+------ -----
+Alice 24
+Bob 19
+```
+
+If `headers="keys"`, then the keys of a dictionary/dataframe, or column
+indices are used. It also works for NumPy record arrays and lists of
+dictionaries or named tuples:
+
+```pycon
+>>> print(tabulate({"Name": ["Alice", "Bob"],
+... "Age": [24, 19]}, headers="keys"))
+ Age Name
+----- ------
+ 24 Alice
+ 19 Bob
+```
+
+### Row Indices
+
+By default, only pandas.DataFrame tables have an additional column
+called row index. To add a similar column to any other type of table,
+pass `showindex="always"` or `showindex=True` argument to `tabulate()`.
+To suppress row indices for all types of data, pass `showindex="never"`
+or `showindex=False`. To add a custom row index column, pass
+`showindex=rowIDs`, where `rowIDs` is some iterable:
+
+```pycon
+>>> print(tabulate([["F",24],["M",19]], showindex="always"))
+- - --
+0 F 24
+1 M 19
+- - --
+```
+
+### Table format
+
+There is more than one way to format a table in plain text. The third
+optional argument named `tablefmt` defines how the table is formatted.
+
+Supported table formats are:
+
+- "plain"
+- "simple"
+- "github"
+- "grid"
+- "simple\_grid"
+- "rounded\_grid"
+- "heavy\_grid"
+- "mixed\_grid"
+- "double\_grid"
+- "fancy\_grid"
+- "outline"
+- "simple\_outline"
+- "rounded\_outline"
+- "heavy\_outline"
+- "mixed\_outline"
+- "double\_outline"
+- "fancy\_outline"
+- "pipe"
+- "orgtbl"
+- "asciidoc"
+- "jira"
+- "presto"
+- "pretty"
+- "psql"
+- "rst"
+- "mediawiki"
+- "moinmoin"
+- "youtrack"
+- "html"
+- "unsafehtml"
+- "latex"
+- "latex\_raw"
+- "latex\_booktabs"
+- "latex\_longtable"
+- "textile"
+- "tsv"
+
+`plain` tables do not use any pseudo-graphics to draw lines:
+
+```pycon
+>>> table = [["spam",42],["eggs",451],["bacon",0]]
+>>> headers = ["item", "qty"]
+>>> print(tabulate(table, headers, tablefmt="plain"))
+item qty
+spam 42
+eggs 451
+bacon 0
+```
+
+`simple` is the default format (the default may change in future
+versions). It corresponds to `simple_tables` in [Pandoc Markdown
+extensions](http://johnmacfarlane.net/pandoc/README.html#tables):
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="simple"))
+item qty
+------ -----
+spam 42
+eggs 451
+bacon 0
+```
+
+`github` follows the conventions of GitHub flavored Markdown. It
+corresponds to the `pipe` format without alignment colons:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="github"))
+| item | qty |
+|--------|-------|
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
+```
+
+`grid` is like tables formatted by Emacs'
+[table.el](http://table.sourceforge.net/) package. It corresponds to
+`grid_tables` in Pandoc Markdown extensions:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="grid"))
++--------+-------+
+| item | qty |
++========+=======+
+| spam | 42 |
++--------+-------+
+| eggs | 451 |
++--------+-------+
+| bacon | 0 |
++--------+-------+
+```
+
+`simple_grid` draws a grid using single-line box-drawing characters:
+
+ >>> print(tabulate(table, headers, tablefmt="simple_grid"))
+ ┌────────┬───────┐
+ │ item │ qty │
+ ├────────┼───────┤
+ │ spam │ 42 │
+ ├────────┼───────┤
+ │ eggs │ 451 │
+ ├────────┼───────┤
+ │ bacon │ 0 │
+ └────────┴───────┘
+
+`rounded_grid` draws a grid using single-line box-drawing characters with rounded corners:
+
+ >>> print(tabulate(table, headers, tablefmt="rounded_grid"))
+ ╭────────┬───────╮
+ │ item │ qty │
+ ├────────┼───────┤
+ │ spam │ 42 │
+ ├────────┼───────┤
+ │ eggs │ 451 │
+ ├────────┼───────┤
+ │ bacon │ 0 │
+ ╰────────┴───────╯
+
+`heavy_grid` draws a grid using bold (thick) single-line box-drawing characters:
+
+ >>> print(tabulate(table, headers, tablefmt="heavy_grid"))
+ ┏━━━━━━━━┳━━━━━━━┓
+ ┃ item ┃ qty ┃
+ ┣━━━━━━━━╋━━━━━━━┫
+ ┃ spam ┃ 42 ┃
+ ┣━━━━━━━━╋━━━━━━━┫
+ ┃ eggs ┃ 451 ┃
+ ┣━━━━━━━━╋━━━━━━━┫
+ ┃ bacon ┃ 0 ┃
+ ┗━━━━━━━━┻━━━━━━━┛
+
+`mixed_grid` draws a grid using a mix of light (thin) and heavy (thick) lines box-drawing characters:
+
+ >>> print(tabulate(table, headers, tablefmt="mixed_grid"))
+ ┍━━━━━━━━┯━━━━━━━┑
+ │ item │ qty │
+ ┝━━━━━━━━┿━━━━━━━┥
+ │ spam │ 42 │
+ ├────────┼───────┤
+ │ eggs │ 451 │
+ ├────────┼───────┤
+ │ bacon │ 0 │
+ ┕━━━━━━━━┷━━━━━━━┙
+
+`double_grid` draws a grid using double-line box-drawing characters:
+
+ >>> print(tabulate(table, headers, tablefmt="double_grid"))
+ ╔════════╦═══════╗
+ ║ item ║ qty ║
+ ╠════════╬═══════╣
+ ║ spam ║ 42 ║
+ ╠════════╬═══════╣
+ ║ eggs ║ 451 ║
+ ╠════════╬═══════╣
+ ║ bacon ║ 0 ║
+ ╚════════╩═══════╝
+
+`fancy_grid` draws a grid using a mix of single and
+ double-line box-drawing characters:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="fancy_grid"))
+╒════════╤═══════╕
+│ item │ qty │
+╞════════╪═══════╡
+│ spam │ 42 │
+├────────┼───────┤
+│ eggs │ 451 │
+├────────┼───────┤
+│ bacon │ 0 │
+╘════════╧═══════╛
+```
+
+`outline` is the same as the `grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="outline"))
+ +--------+-------+
+ | item | qty |
+ +========+=======+
+ | spam | 42 |
+ | eggs | 451 |
+ | bacon | 0 |
+ +--------+-------+
+
+`simple_outline` is the same as the `simple_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="simple_outline"))
+ ┌────────┬───────┐
+ │ item │ qty │
+ ├────────┼───────┤
+ │ spam │ 42 │
+ │ eggs │ 451 │
+ │ bacon │ 0 │
+ └────────┴───────┘
+
+`rounded_outline` is the same as the `rounded_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="rounded_outline"))
+ ╭────────┬───────╮
+ │ item │ qty │
+ ├────────┼───────┤
+ │ spam │ 42 │
+ │ eggs │ 451 │
+ │ bacon │ 0 │
+ ╰────────┴───────╯
+
+`heavy_outline` is the same as the `heavy_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="heavy_outline"))
+ ┏━━━━━━━━┳━━━━━━━┓
+ ┃ item ┃ qty ┃
+ ┣━━━━━━━━╋━━━━━━━┫
+ ┃ spam ┃ 42 ┃
+ ┃ eggs ┃ 451 ┃
+ ┃ bacon ┃ 0 ┃
+ ┗━━━━━━━━┻━━━━━━━┛
+
+`mixed_outline` is the same as the `mixed_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="mixed_outline"))
+ ┍━━━━━━━━┯━━━━━━━┑
+ │ item │ qty │
+ ┝━━━━━━━━┿━━━━━━━┥
+ │ spam │ 42 │
+ │ eggs │ 451 │
+ │ bacon │ 0 │
+ ┕━━━━━━━━┷━━━━━━━┙
+
+`double_outline` is the same as the `double_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="double_outline"))
+ ╔════════╦═══════╗
+ ║ item ║ qty ║
+ ╠════════╬═══════╣
+ ║ spam ║ 42 ║
+ ║ eggs ║ 451 ║
+ ║ bacon ║ 0 ║
+ ╚════════╩═══════╝
+
+`fancy_outline` is the same as the `fancy_grid` format but doesn't draw lines between rows:
+
+ >>> print(tabulate(table, headers, tablefmt="fancy_outline"))
+ ╒════════╤═══════╕
+ │ item │ qty │
+ ╞════════╪═══════╡
+ │ spam │ 42 │
+ │ eggs │ 451 │
+ │ bacon │ 0 │
+ ╘════════╧═══════╛
+
+`presto` is like tables formatted by Presto cli:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="presto"))
+ item | qty
+--------+-------
+ spam | 42
+ eggs | 451
+ bacon | 0
+```
+
+`pretty` attempts to be close to the format emitted by the PrettyTables
+library:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="pretty"))
++-------+-----+
+| item | qty |
++-------+-----+
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
++-------+-----+
+```
+
+`psql` is like tables formatted by Postgres' psql cli:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="psql"))
++--------+-------+
+| item | qty |
+|--------+-------|
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
++--------+-------+
+```
+
+`pipe` follows the conventions of [PHP Markdown
+Extra](http://michelf.ca/projects/php-markdown/extra/#table) extension.
+It corresponds to `pipe_tables` in Pandoc. This format uses colons to
+indicate column alignment:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="pipe"))
+| item | qty |
+|:-------|------:|
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
+```
+
+`asciidoc` formats data like a simple table of the
+[AsciiDoctor](https://docs.asciidoctor.org/asciidoc/latest/syntax-quick-reference/#tables)
+format:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="asciidoc"))
+[cols="8<,7>",options="header"]
+|====
+| item | qty
+| spam | 42
+| eggs | 451
+| bacon | 0
+|====
+```
+
+`orgtbl` follows the conventions of Emacs
+[org-mode](http://orgmode.org/manual/Tables.html), and is editable also
+in the minor orgtbl-mode. Hence its name:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="orgtbl"))
+| item | qty |
+|--------+-------|
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
+```
+
+`jira` follows the conventions of Atlassian Jira markup language:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="jira"))
+|| item || qty ||
+| spam | 42 |
+| eggs | 451 |
+| bacon | 0 |
+```
+
+`rst` formats data like a simple table of the
+[reStructuredText](http://docutils.sourceforge.net/docs/user/rst/quickref.html#tables)
+format:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="rst"))
+====== =====
+item qty
+====== =====
+spam 42
+eggs 451
+bacon 0
+====== =====
+```
+
+`mediawiki` format produces a table markup used in
+[Wikipedia](http://www.mediawiki.org/wiki/Help:Tables) and on other
+MediaWiki-based sites:
+
+ ```pycon
+>>> print(tabulate(table, headers, tablefmt="mediawiki"))
+{| class="wikitable" style="text-align: left;"
+|+
+|-
+! item !! align="right"| qty
+|-
+| spam || align="right"| 42
+|-
+| eggs || align="right"| 451
+|-
+| bacon || align="right"| 0
+|}
+```
+
+`moinmoin` format produces a table markup used in
+[MoinMoin](https://moinmo.in/) wikis:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="moinmoin"))
+|| ''' item ''' || ''' quantity ''' ||
+|| spam || 41.999 ||
+|| eggs || 451 ||
+|| bacon || ||
+```
+
+`youtrack` format produces a table markup used in Youtrack tickets:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="youtrack"))
+|| item || quantity ||
+| spam | 41.999 |
+| eggs | 451 |
+| bacon | |
+```
+
+`textile` format produces a table markup used in
+[Textile](http://redcloth.org/hobix.com/textile/) format:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="textile"))
+|_. item |_. qty |
+|<. spam |>. 42 |
+|<. eggs |>. 451 |
+|<. bacon |>. 0 |
+```
+
+`html` produces standard HTML markup as an html.escape'd str
+with a ._repr_html_ method so that Jupyter Lab and Notebook display the HTML
+and a .str property so that the raw HTML remains accessible.
+`unsafehtml` table format can be used if an unescaped HTML is required:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="html"))
+
+
+item | qty |
+spam | 42 |
+eggs | 451 |
+bacon | 0 |
+
+
+```
+
+`latex` format creates a `tabular` environment for LaTeX markup,
+replacing special characters like `_` or `\` to their LaTeX
+correspondents:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="latex"))
+\begin{tabular}{lr}
+\hline
+ item & qty \\
+\hline
+ spam & 42 \\
+ eggs & 451 \\
+ bacon & 0 \\
+\hline
+\end{tabular}
+```
+
+`latex_raw` behaves like `latex` but does not escape LaTeX commands and
+special characters.
+
+`latex_booktabs` creates a `tabular` environment for LaTeX markup using
+spacing and style from the `booktabs` package.
+
+`latex_longtable` creates a table that can stretch along multiple pages,
+using the `longtable` package.
+
+### Column alignment
+
+`tabulate` is smart about column alignment. It detects columns which
+contain only numbers, and aligns them by a decimal point (or flushes
+them to the right if they appear to be integers). Text columns are
+flushed to the left.
+
+You can override the default alignment with `numalign` and `stralign`
+named arguments. Possible column alignments are: `right`, `center`,
+`left`, `decimal` (only for numbers), and `None` (to disable alignment).
+
+Aligning by a decimal point works best when you need to compare numbers
+at a glance:
+
+```pycon
+>>> print(tabulate([[1.2345],[123.45],[12.345],[12345],[1234.5]]))
+----------
+ 1.2345
+ 123.45
+ 12.345
+12345
+ 1234.5
+----------
+```
+
+Compare this with a more common right alignment:
+
+```pycon
+>>> print(tabulate([[1.2345],[123.45],[12.345],[12345],[1234.5]], numalign="right"))
+------
+1.2345
+123.45
+12.345
+ 12345
+1234.5
+------
+```
+
+For `tabulate`, anything which can be parsed as a number is a number.
+Even numbers represented as strings are aligned properly. This feature
+comes in handy when reading a mixed table of text and numbers from a
+file:
+
+```pycon
+>>> import csv ; from StringIO import StringIO
+>>> table = list(csv.reader(StringIO("spam, 42\neggs, 451\n")))
+>>> table
+[['spam', ' 42'], ['eggs', ' 451']]
+>>> print(tabulate(table))
+---- ----
+spam 42
+eggs 451
+---- ----
+```
+
+To disable this feature use `disable_numparse=True`.
+
+```pycon
+>>> print(tabulate.tabulate([["Ver1", "18.0"], ["Ver2","19.2"]], tablefmt="simple", disable_numparse=True))
+---- ----
+Ver1 18.0
+Ver2 19.2
+---- ----
+```
+
+### Custom column alignment
+
+`tabulate` allows a custom column alignment to override the above. The
+`colalign` argument can be a list or a tuple of `stralign` named
+arguments. Possible column alignments are: `right`, `center`, `left`,
+`decimal` (only for numbers), and `None` (to disable alignment).
+Omitting an alignment uses the default. For example:
+
+```pycon
+>>> print(tabulate([["one", "two"], ["three", "four"]], colalign=("right",))
+----- ----
+ one two
+three four
+----- ----
+```
+
+### Number formatting
+
+`tabulate` allows to define custom number formatting applied to all
+columns of decimal numbers. Use `floatfmt` named argument:
+
+```pycon
+>>> print(tabulate([["pi",3.141593],["e",2.718282]], floatfmt=".4f"))
+-- ------
+pi 3.1416
+e 2.7183
+-- ------
+```
+
+`floatfmt` argument can be a list or a tuple of format strings, one per
+column, in which case every column may have different number formatting:
+
+```pycon
+>>> print(tabulate([[0.12345, 0.12345, 0.12345]], floatfmt=(".1f", ".3f")))
+--- ----- -------
+0.1 0.123 0.12345
+--- ----- -------
+```
+
+`intfmt` works similarly for integers
+
+ >>> print(tabulate([["a",1000],["b",90000]], intfmt=","))
+ - ------
+ a 1,000
+ b 90,000
+ - ------
+
+### Text formatting
+
+By default, `tabulate` removes leading and trailing whitespace from text
+columns. To disable whitespace removal, set the global module-level flag
+`PRESERVE_WHITESPACE`:
+
+```python
+import tabulate
+tabulate.PRESERVE_WHITESPACE = True
+```
+
+### Wide (fullwidth CJK) symbols
+
+To properly align tables which contain wide characters (typically
+fullwidth glyphs from Chinese, Japanese or Korean languages), the user
+should install `wcwidth` library. To install it together with
+`tabulate`:
+
+```shell
+pip install tabulate[widechars]
+```
+
+Wide character support is enabled automatically if `wcwidth` library is
+already installed. To disable wide characters support without
+uninstalling `wcwidth`, set the global module-level flag
+`WIDE_CHARS_MODE`:
+
+```python
+import tabulate
+tabulate.WIDE_CHARS_MODE = False
+```
+
+### Multiline cells
+
+Most table formats support multiline cell text (text containing newline
+characters). The newline characters are honored as line break
+characters.
+
+Multiline cells are supported for data rows and for header rows.
+
+Further automatic line breaks are not inserted. Of course, some output
+formats such as latex or html handle automatic formatting of the cell
+content on their own, but for those that don't, the newline characters
+in the input cell text are the only means to break a line in cell text.
+
+Note that some output formats (e.g. simple, or plain) do not represent
+row delimiters, so that the representation of multiline cells in such
+formats may be ambiguous to the reader.
+
+The following examples of formatted output use the following table with
+a multiline cell, and headers with a multiline cell:
+
+```pycon
+>>> table = [["eggs",451],["more\nspam",42]]
+>>> headers = ["item\nname", "qty"]
+```
+
+`plain` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="plain"))
+item qty
+name
+eggs 451
+more 42
+spam
+```
+
+`simple` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="simple"))
+item qty
+name
+------ -----
+eggs 451
+more 42
+spam
+```
+
+`grid` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="grid"))
++--------+-------+
+| item | qty |
+| name | |
++========+=======+
+| eggs | 451 |
++--------+-------+
+| more | 42 |
+| spam | |
++--------+-------+
+```
+
+`fancy_grid` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="fancy_grid"))
+╒════════╤═══════╕
+│ item │ qty │
+│ name │ │
+╞════════╪═══════╡
+│ eggs │ 451 │
+├────────┼───────┤
+│ more │ 42 │
+│ spam │ │
+╘════════╧═══════╛
+```
+
+`pipe` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="pipe"))
+| item | qty |
+| name | |
+|:-------|------:|
+| eggs | 451 |
+| more | 42 |
+| spam | |
+```
+
+`orgtbl` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="orgtbl"))
+| item | qty |
+| name | |
+|--------+-------|
+| eggs | 451 |
+| more | 42 |
+| spam | |
+```
+
+`jira` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="jira"))
+| item | qty |
+| name | |
+|:-------|------:|
+| eggs | 451 |
+| more | 42 |
+| spam | |
+```
+
+`presto` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="presto"))
+ item | qty
+ name |
+--------+-------
+ eggs | 451
+ more | 42
+ spam |
+```
+
+`pretty` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="pretty"))
++------+-----+
+| item | qty |
+| name | |
++------+-----+
+| eggs | 451 |
+| more | 42 |
+| spam | |
++------+-----+
+```
+
+`psql` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="psql"))
++--------+-------+
+| item | qty |
+| name | |
+|--------+-------|
+| eggs | 451 |
+| more | 42 |
+| spam | |
++--------+-------+
+```
+
+`rst` tables:
+
+```pycon
+>>> print(tabulate(table, headers, tablefmt="rst"))
+====== =====
+item qty
+name
+====== =====
+eggs 451
+more 42
+spam
+====== =====
+```
+
+Multiline cells are not well-supported for the other table formats.
+
+### Automating Multilines
+While tabulate supports data passed in with multilines entries explicitly provided,
+it also provides some support to help manage this work internally.
+
+The `maxcolwidths` argument is a list where each entry specifies the max width for
+it's respective column. Any cell that will exceed this will automatically wrap the content.
+To assign the same max width for all columns, a singular int scaler can be used.
+
+Use `None` for any columns where an explicit maximum does not need to be provided,
+and thus no automate multiline wrapping will take place.
+
+The wrapping uses the python standard [textwrap.wrap](https://docs.python.org/3/library/textwrap.html#textwrap.wrap)
+function with default parameters - aside from width.
+
+This example demonstrates usage of automatic multiline wrapping, though typically
+the lines being wrapped would probably be significantly longer than this.
+
+```pycon
+>>> print(tabulate([["John Smith", "Middle Manager"]], headers=["Name", "Title"], tablefmt="grid", maxcolwidths=[None, 8]))
++------------+---------+
+| Name | Title |
++============+=========+
+| John Smith | Middle |
+| | Manager |
++------------+---------+
+```
+
+### Adding Separating lines
+One might want to add one or more separating lines to highlight different sections in a table.
+
+The separating lines will be of the same type as the one defined by the specified formatter as either the
+linebetweenrows, linebelowheader, linebelow, lineabove or just a simple empty line when none is defined for the formatter
+
+
+ >>> from tabulate import tabulate, SEPARATING_LINE
+
+ table = [["Earth",6371],
+ ["Mars",3390],
+ SEPARATING_LINE,
+ ["Moon",1737]]
+ print(tabulate(table, tablefmt="simple"))
+ ----- ----
+ Earth 6371
+ Mars 3390
+ ----- ----
+ Moon 1737
+ ----- ----
+
+### ANSI support
+ANSI escape codes are non-printable byte sequences usually used for terminal operations like setting
+color output or modifying cursor positions. Because multi-byte ANSI sequences are inherently non-printable,
+they can still introduce unwanted extra length to strings. For example:
+
+ >>> len('\033[31mthis text is red\033[0m') # printable length is 16
+ 25
+
+To deal with this, string lengths are calculated after first removing all ANSI escape sequences. This ensures
+that the actual printable length is used for column widths, rather than the byte length. In the final, printable
+table, however, ANSI escape sequences are not removed so the original styling is preserved.
+
+Some terminals support a special grouping of ANSI escape sequences that are intended to display hyperlinks
+much in the same way they are shown in browsers. These are handled just as mentioned before: non-printable
+ANSI escape sequences are removed prior to string length calculation. The only diifference with escaped
+hyperlinks is that column width will be based on the length of the URL _text_ rather than the URL
+itself (terminals would show this text). For example:
+
+ >>> len('\x1b]8;;https://example.com\x1b\\example\x1b]8;;\x1b\\') # display length is 7, showing 'example'
+ 45
+
+
+Usage of the command line utility
+---------------------------------
+
+ Usage: tabulate [options] [FILE ...]
+
+ FILE a filename of the file with tabular data;
+ if "-" or missing, read data from stdin.
+
+ Options:
+
+ -h, --help show this message
+ -1, --header use the first row of data as a table header
+ -o FILE, --output FILE print table to FILE (default: stdout)
+ -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
+ -F FPFMT, --float FPFMT floating point number format (default: g)
+ -I INTFMT, --int INTFMT integer point number format (default: "")
+ -f FMT, --format FMT set output table format; supported formats:
+ plain, simple, github, grid, fancy_grid, pipe,
+ orgtbl, rst, mediawiki, html, latex, latex_raw,
+ latex_booktabs, latex_longtable, tsv
+ (default: simple)
+
+Performance considerations
+--------------------------
+
+Such features as decimal point alignment and trying to parse everything
+as a number imply that `tabulate`:
+
+- has to "guess" how to print a particular tabular data type
+- needs to keep the entire table in-memory
+- has to "transpose" the table twice
+- does much more work than it may appear
+
+It may not be suitable for serializing really big tables (but who's
+going to do that, anyway?) or printing tables in performance sensitive
+applications. `tabulate` is about two orders of magnitude slower than
+simply joining lists of values with a tab, comma, or other separator.
+
+At the same time, `tabulate` is comparable to other table
+pretty-printers. Given a 10x10 table (a list of lists) of mixed text and
+numeric data, `tabulate` appears to be slower than `asciitable`, and
+faster than `PrettyTable` and `texttable` The following mini-benchmark
+was run in Python 3.9.13 on Windows 10:
+
+ ================================= ========== ===========
+ Table formatter time, μs rel. time
+ ================================= ========== ===========
+ csv to StringIO 12.5 1.0
+ join with tabs and newlines 14.6 1.2
+ asciitable (0.8.0) 192.0 15.4
+ tabulate (0.9.0) 483.5 38.7
+ tabulate (0.9.0, WIDE_CHARS_MODE) 637.6 51.1
+ PrettyTable (3.4.1) 1080.6 86.6
+ texttable (1.6.4) 1390.3 111.4
+ ================================= ========== ===========
+
+
+Version history
+---------------
+
+The full version history can be found at the [changelog](https://github.com/astanin/python-tabulate/blob/master/CHANGELOG).
+
+How to contribute
+-----------------
+
+Contributions should include tests and an explanation for the changes
+they propose. Documentation (examples, docstrings, README.md) should be
+updated accordingly.
+
+This project uses [pytest](https://docs.pytest.org/) testing
+framework and [tox](https://tox.readthedocs.io/) to automate testing in
+different environments. Add tests to one of the files in the `test/`
+folder.
+
+To run tests on all supported Python versions, make sure all Python
+interpreters, `pytest` and `tox` are installed, then run `tox` in the root
+of the project source tree.
+
+On Linux `tox` expects to find executables like `python3.7`, `python3.8` etc.
+On Windows it looks for `C:\Python37\python.exe`, `C:\Python38\python.exe` etc. respectively.
+
+One way to install all the required versions of the Python interpreter is to use [pyenv](https://github.com/pyenv/pyenv).
+All versions can then be easily installed with something like:
+
+ pyenv install 3.7.12
+ pyenv install 3.8.12
+ ...
+
+Don't forget to change your `PATH` so that `tox` knows how to find all the installed versions. Something like
+
+ export PATH="${PATH}:${HOME}/.pyenv/shims"
+
+To test only some Python environments, use `-e` option. For example, to
+test only against Python 3.7 and Python 3.10, run:
+
+```shell
+tox -e py37,py310
+```
+
+in the root of the project source tree.
+
+To enable NumPy and Pandas tests, run:
+
+```shell
+tox -e py37-extra,py310-extra
+```
+
+(this may take a long time the first time, because NumPy and Pandas will
+have to be installed in the new virtual environments)
+
+To fix code formatting:
+
+```shell
+tox -e lint
+```
+
+See `tox.ini` file to learn how to use to test
+individual Python versions.
+
+Contributors
+------------
+
+Sergey Astanin, Pau Tallada Crespí, Erwin Marsi, Mik Kocikowski, Bill
+Ryder, Zach Dwiel, Frederik Rietdijk, Philipp Bogensberger, Greg
+(anonymous), Stefan Tatschner, Emiel van Miltenburg, Brandon Bennett,
+Amjith Ramanujam, Jan Schulz, Simon Percivall, Javier Santacruz
+López-Cepero, Sam Denton, Alexey Ziyangirov, acaird, Cesar Sanchez,
+naught101, John Vandenberg, Zack Dever, Christian Clauss, Benjamin
+Maier, Andy MacKinlay, Thomas Roten, Jue Wang, Joe King, Samuel Phan,
+Nick Satterly, Daniel Robbins, Dmitry B, Lars Butler, Andreas Maier,
+Dick Marinus, Sébastien Celles, Yago González, Andrew Gaul, Wim Glenn,
+Jean Michel Rouly, Tim Gates, John Vandenberg, Sorin Sbarnea,
+Wes Turner, Andrew Tija, Marco Gorelli, Sean McGinnis, danja100,
+endolith, Dominic Davis-Foster, pavlocat, Daniel Aslau, paulc,
+Felix Yan, Shane Loretz, Frank Busse, Harsh Singh, Derek Weitzel,
+Vladimir Vrzić, 서승우 (chrd5273), Georgy Frolov, Christian Cwienk,
+Bart Broere, Vilhelm Prytz, Alexander Gažo, Hugo van Kemenade,
+jamescooke, Matt Warner, Jérôme Provensal, Kevin Deldycke,
+Kian-Meng Ang, Kevin Patterson, Shodhan Save, cleoold, KOLANICH,
+Vijaya Krishna Kasula, Furcy Pin, Christian Fibich, Shaun Duncan,
+Dimitri Papadopoulos.
+
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..10514ec11b2579b0ce185b2f4ac5561784cfe1e6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/RECORD
@@ -0,0 +1,12 @@
+../../../bin/tabulate,sha256=dVnfe0E_liOD8GRTwtNmmnefPYRBlBJKIOVBve2T6x8,243
+tabulate-0.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+tabulate-0.9.0.dist-info/LICENSE,sha256=zfq1DTfY6tBkaPt2o6uvzQXBZ0nsihjuv6UP1Ix8stI,1080
+tabulate-0.9.0.dist-info/METADATA,sha256=8oAqreJhIJG0WVHyZa8pF0-QwyNvaMyMzetkaUHmKWk,34132
+tabulate-0.9.0.dist-info/RECORD,,
+tabulate-0.9.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+tabulate-0.9.0.dist-info/entry_points.txt,sha256=8DmChBYma2n4UqC1VkkKbD5Nu4MrdZasURoeTtG0JVo,44
+tabulate-0.9.0.dist-info/top_level.txt,sha256=qfqkQ2az7LTxUeRePtX8ggmh294Kf1ERdI-11aWqFZU,9
+tabulate/__init__.py,sha256=X3rwoo_NcTuDDJc4hnWUX6jElQsFtY-NGHyQCldS1X0,95290
+tabulate/__pycache__/__init__.cpython-310.pyc,,
+tabulate/__pycache__/version.cpython-310.pyc,,
+tabulate/version.py,sha256=QVVpjnTor93ym-Tb6Y_XtL_6pmQ3MtoNy3Q6I0o3Yqg,181
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1efc880fa41d55743166d009c0a7368ba2493679
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[console_scripts]
+tabulate = tabulate:_main
diff --git a/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a5d51591ebf67bc53acb1c8481da364dae742a02
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/tabulate-0.9.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+tabulate