diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..d8dd6d12d6d35bcff6b00d4b3ed960f5eef4b9d0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/async_timeout-4.0.3.dist-info/METADATA @@ -0,0 +1,131 @@ +Metadata-Version: 2.1 +Name: async-timeout +Version: 4.0.3 +Summary: Timeout context manager for asyncio programs +Home-page: https://github.com/aio-libs/async-timeout +Author: Andrew Svetlov +Author-email: andrew.svetlov@gmail.com +License: Apache 2 +Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby +Project-URL: CI: GitHub Actions, https://github.com/aio-libs/async-timeout/actions +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/async-timeout +Project-URL: GitHub: issues, https://github.com/aio-libs/async-timeout/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/async-timeout +Classifier: Development Status :: 5 - Production/Stable +Classifier: Topic :: Software Development :: Libraries +Classifier: Framework :: AsyncIO +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: typing-extensions >=3.6.5 ; python_version < "3.8" + +async-timeout +============= +.. image:: https://travis-ci.com/aio-libs/async-timeout.svg?branch=master + :target: https://travis-ci.com/aio-libs/async-timeout +.. image:: https://codecov.io/gh/aio-libs/async-timeout/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/async-timeout +.. image:: https://img.shields.io/pypi/v/async-timeout.svg + :target: https://pypi.python.org/pypi/async-timeout +.. image:: https://badges.gitter.im/Join%20Chat.svg + :target: https://gitter.im/aio-libs/Lobby + :alt: Chat on Gitter + +asyncio-compatible timeout context manager. + + +Usage example +------------- + + +The context manager is useful in cases when you want to apply timeout +logic around block of code or in cases when ``asyncio.wait_for()`` is +not suitable. Also it's much faster than ``asyncio.wait_for()`` +because ``timeout`` doesn't create a new task. + +The ``timeout(delay, *, loop=None)`` call returns a context manager +that cancels a block on *timeout* expiring:: + + from async_timeout import timeout + async with timeout(1.5): + await inner() + +1. If ``inner()`` is executed faster than in ``1.5`` seconds nothing + happens. +2. Otherwise ``inner()`` is cancelled internally by sending + ``asyncio.CancelledError`` into but ``asyncio.TimeoutError`` is + raised outside of context manager scope. + +*timeout* parameter could be ``None`` for skipping timeout functionality. + + +Alternatively, ``timeout_at(when)`` can be used for scheduling +at the absolute time:: + + loop = asyncio.get_event_loop() + now = loop.time() + + async with timeout_at(now + 1.5): + await inner() + + +Please note: it is not POSIX time but a time with +undefined starting base, e.g. the time of the system power on. + + +Context manager has ``.expired`` property for check if timeout happens +exactly in context manager:: + + async with timeout(1.5) as cm: + await inner() + print(cm.expired) + +The property is ``True`` if ``inner()`` execution is cancelled by +timeout context manager. + +If ``inner()`` call explicitly raises ``TimeoutError`` ``cm.expired`` +is ``False``. + +The scheduled deadline time is available as ``.deadline`` property:: + + async with timeout(1.5) as cm: + cm.deadline + +Not finished yet timeout can be rescheduled by ``shift_by()`` +or ``shift_to()`` methods:: + + async with timeout(1.5) as cm: + cm.shift(1) # add another second on waiting + cm.update(loop.time() + 5) # reschedule to now+5 seconds + +Rescheduling is forbidden if the timeout is expired or after exit from ``async with`` +code block. + + +Installation +------------ + +:: + + $ pip install async-timeout + +The library is Python 3 only! + + + +Authors and License +------------------- + +The module is written by Andrew Svetlov. + +It's *Apache 2* licensed and freely available. diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b309b01ce11496b9318887cf0dc32aecfa10876 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00c1b5fba3bec2ca3de25ca95f0eb547148c31d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..323b0a20c782ce333c139182e9043df1af524b50 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..032cf52d83ccc277fd7e1e281012d930ca482175 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_templating.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ccbf90be8054a2d62772bbf3da5f73427ccd186 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e91051252dcfc8951d153522cb258f15685a0be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd6dd6aa362efa87a30c52bda6b01bdb3409f52 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..647633c5fbf0f589d050274e9104ffb0d66c2ff5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06c71c26db941b0694fb5b29f1b6eb0ee553e5c9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de981105b64c389ecd69e51530d44ab239ca48d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..846f8ef4855b1224cfb7f4dbcf0111dae6b11f07 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5771a6751f012734255eeeabafddf891ad768089 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/datasetcard_template.md b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/datasetcard_template.md new file mode 100644 index 0000000000000000000000000000000000000000..9af29ebbed93653ec74a8952e314e7554323ef15 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/datasetcard_template.md @@ -0,0 +1,143 @@ +--- +# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/datasets-cards +{{ card_data }} +--- + +# Dataset Card for {{ pretty_name | default("Dataset Name", true) }} + + + +{{ dataset_summary | default("", true) }} + +## Dataset Details + +### Dataset Description + + + +{{ dataset_description | default("", true) }} + +- **Curated by:** {{ curators | default("[More Information Needed]", true)}} +- **Funded by [optional]:** {{ funded_by | default("[More Information Needed]", true)}} +- **Shared by [optional]:** {{ shared_by | default("[More Information Needed]", true)}} +- **Language(s) (NLP):** {{ language | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} + +### Dataset Sources [optional] + + + +- **Repository:** {{ repo | default("[More Information Needed]", true)}} +- **Paper [optional]:** {{ paper | default("[More Information Needed]", true)}} +- **Demo [optional]:** {{ demo | default("[More Information Needed]", true)}} + +## Uses + + + +### Direct Use + + + +{{ direct_use | default("[More Information Needed]", true)}} + +### Out-of-Scope Use + + + +{{ out_of_scope_use | default("[More Information Needed]", true)}} + +## Dataset Structure + + + +{{ dataset_structure | default("[More Information Needed]", true)}} + +## Dataset Creation + +### Curation Rationale + + + +{{ curation_rationale_section | default("[More Information Needed]", true)}} + +### Source Data + + + +#### Data Collection and Processing + + + +{{ data_collection_and_processing_section | default("[More Information Needed]", true)}} + +#### Who are the source data producers? + + + +{{ source_data_producers_section | default("[More Information Needed]", true)}} + +### Annotations [optional] + + + +#### Annotation process + + + +{{ annotation_process_section | default("[More Information Needed]", true)}} + +#### Who are the annotators? + + + +{{ who_are_annotators_section | default("[More Information Needed]", true)}} + +#### Personal and Sensitive Information + + + +{{ personal_and_sensitive_information | default("[More Information Needed]", true)}} + +## Bias, Risks, and Limitations + + + +{{ bias_risks_limitations | default("[More Information Needed]", true)}} + +### Recommendations + + + +{{ bias_recommendations | default("Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", true)}} + +## Citation [optional] + + + +**BibTeX:** + +{{ citation_bibtex | default("[More Information Needed]", true)}} + +**APA:** + +{{ citation_apa | default("[More Information Needed]", true)}} + +## Glossary [optional] + + + +{{ glossary | default("[More Information Needed]", true)}} + +## More Information [optional] + +{{ more_information | default("[More Information Needed]", true)}} + +## Dataset Card Authors [optional] + +{{ dataset_card_authors | default("[More Information Needed]", true)}} + +## Dataset Card Contact + +{{ dataset_card_contact | default("[More Information Needed]", true)}} diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/modelcard_template.md b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/modelcard_template.md new file mode 100644 index 0000000000000000000000000000000000000000..79ca15e4547debac763b390ef8e4b715e6f6403f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/templates/modelcard_template.md @@ -0,0 +1,200 @@ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +{{ card_data }} +--- + +# Model Card for {{ model_id | default("Model ID", true) }} + + + +{{ model_summary | default("", true) }} + +## Model Details + +### Model Description + + + +{{ model_description | default("", true) }} + +- **Developed by:** {{ developers | default("[More Information Needed]", true)}} +- **Funded by [optional]:** {{ funded_by | default("[More Information Needed]", true)}} +- **Shared by [optional]:** {{ shared_by | default("[More Information Needed]", true)}} +- **Model type:** {{ model_type | default("[More Information Needed]", true)}} +- **Language(s) (NLP):** {{ language | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} +- **Finetuned from model [optional]:** {{ base_model | default("[More Information Needed]", true)}} + +### Model Sources [optional] + + + +- **Repository:** {{ repo | default("[More Information Needed]", true)}} +- **Paper [optional]:** {{ paper | default("[More Information Needed]", true)}} +- **Demo [optional]:** {{ demo | default("[More Information Needed]", true)}} + +## Uses + + + +### Direct Use + + + +{{ direct_use | default("[More Information Needed]", true)}} + +### Downstream Use [optional] + + + +{{ downstream_use | default("[More Information Needed]", true)}} + +### Out-of-Scope Use + + + +{{ out_of_scope_use | default("[More Information Needed]", true)}} + +## Bias, Risks, and Limitations + + + +{{ bias_risks_limitations | default("[More Information Needed]", true)}} + +### Recommendations + + + +{{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", true)}} + +## How to Get Started with the Model + +Use the code below to get started with the model. + +{{ get_started_code | default("[More Information Needed]", true)}} + +## Training Details + +### Training Data + + + +{{ training_data | default("[More Information Needed]", true)}} + +### Training Procedure + + + +#### Preprocessing [optional] + +{{ preprocessing | default("[More Information Needed]", true)}} + + +#### Training Hyperparameters + +- **Training regime:** {{ training_regime | default("[More Information Needed]", true)}} + +#### Speeds, Sizes, Times [optional] + + + +{{ speeds_sizes_times | default("[More Information Needed]", true)}} + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +{{ testing_data | default("[More Information Needed]", true)}} + +#### Factors + + + +{{ testing_factors | default("[More Information Needed]", true)}} + +#### Metrics + + + +{{ testing_metrics | default("[More Information Needed]", true)}} + +### Results + +{{ results | default("[More Information Needed]", true)}} + +#### Summary + +{{ results_summary | default("", true) }} + +## Model Examination [optional] + + + +{{ model_examination | default("[More Information Needed]", true)}} + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** {{ hardware_type | default("[More Information Needed]", true)}} +- **Hours used:** {{ hours_used | default("[More Information Needed]", true)}} +- **Cloud Provider:** {{ cloud_provider | default("[More Information Needed]", true)}} +- **Compute Region:** {{ cloud_region | default("[More Information Needed]", true)}} +- **Carbon Emitted:** {{ co2_emitted | default("[More Information Needed]", true)}} + +## Technical Specifications [optional] + +### Model Architecture and Objective + +{{ model_specs | default("[More Information Needed]", true)}} + +### Compute Infrastructure + +{{ compute_infrastructure | default("[More Information Needed]", true)}} + +#### Hardware + +{{ hardware_requirements | default("[More Information Needed]", true)}} + +#### Software + +{{ software | default("[More Information Needed]", true)}} + +## Citation [optional] + + + +**BibTeX:** + +{{ citation_bibtex | default("[More Information Needed]", true)}} + +**APA:** + +{{ citation_apa | default("[More Information Needed]", true)}} + +## Glossary [optional] + + + +{{ glossary | default("[More Information Needed]", true)}} + +## More Information [optional] + +{{ more_information | default("[More Information Needed]", true)}} + +## Model Card Authors [optional] + +{{ model_card_authors | default("[More Information Needed]", true)}} + +## Model Card Contact + +{{ model_card_contact | default("[More Information Needed]", true)}} diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ce273a946879419fbd4300641dda9bd63e7f795 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa21be62a31fd3aa64433107c0d4c136699fd2f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a90158576247bf66e78f957463157276463b4f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27f35716b5c2183f5348ba52b2c388526e0f4fc3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b66f50edf53a871ac3d711e18ffcc082b8072a7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d5f7bb28c392db247e0371242ef7f2e86776eea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b30fcdc8a12ab987c6c278f15a4948cb09d436aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10e83cc4394d559255c1f74f268de0870e75ddcc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f33307c011e6578a690971780e28b44e2b82498d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8bf7d71179f1478c317e770efa495624e661e33 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79dbec5e3e17f7e83870e093b832304a29ddb5b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebd0eab65ae685a47a723612e77cd9c7adaa9ba5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py new file mode 100644 index 0000000000000000000000000000000000000000..e5d435df9b0bb0c67c0bcb5ef65711e9aef367f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_cache_assets.py @@ -0,0 +1,135 @@ +# coding=utf-8 +# Copyright 2019-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path +from typing import Union + +from ..constants import HF_ASSETS_CACHE + + +def cached_assets_path( + library_name: str, + namespace: str = "default", + subfolder: str = "default", + *, + assets_dir: Union[str, Path, None] = None, +): + """Return a folder path to cache arbitrary files. + + `huggingface_hub` provides a canonical folder path to store assets. This is the + recommended way to integrate cache in a downstream library as it will benefit from + the builtins tools to scan and delete the cache properly. + + The distinction is made between files cached from the Hub and assets. Files from the + Hub are cached in a git-aware manner and entirely managed by `huggingface_hub`. See + [related documentation](https://huggingface.co/docs/huggingface_hub/how-to-cache). + All other files that a downstream library caches are considered to be "assets" + (files downloaded from external sources, extracted from a .tar archive, preprocessed + for training,...). + + Once the folder path is generated, it is guaranteed to exist and to be a directory. + The path is based on 3 levels of depth: the library name, a namespace and a + subfolder. Those 3 levels grants flexibility while allowing `huggingface_hub` to + expect folders when scanning/deleting parts of the assets cache. Within a library, + it is expected that all namespaces share the same subset of subfolder names but this + is not a mandatory rule. The downstream library has then full control on which file + structure to adopt within its cache. Namespace and subfolder are optional (would + default to a `"default/"` subfolder) but library name is mandatory as we want every + downstream library to manage its own cache. + + Expected tree: + ```text + assets/ + └── datasets/ + │ ├── SQuAD/ + │ │ ├── downloaded/ + │ │ ├── extracted/ + │ │ └── processed/ + │ ├── Helsinki-NLP--tatoeba_mt/ + │ ├── downloaded/ + │ ├── extracted/ + │ └── processed/ + └── transformers/ + ├── default/ + │ ├── something/ + ├── bert-base-cased/ + │ ├── default/ + │ └── training/ + hub/ + └── models--julien-c--EsperBERTo-small/ + ├── blobs/ + │ ├── (...) + │ ├── (...) + ├── refs/ + │ └── (...) + └── [ 128] snapshots/ + ├── 2439f60ef33a0d46d85da5001d52aeda5b00ce9f/ + │ ├── (...) + └── bbc77c8132af1cc5cf678da3f1ddf2de43606d48/ + └── (...) + ``` + + + Args: + library_name (`str`): + Name of the library that will manage the cache folder. Example: `"dataset"`. + namespace (`str`, *optional*, defaults to "default"): + Namespace to which the data belongs. Example: `"SQuAD"`. + subfolder (`str`, *optional*, defaults to "default"): + Subfolder in which the data will be stored. Example: `extracted`. + assets_dir (`str`, `Path`, *optional*): + Path to the folder where assets are cached. This must not be the same folder + where Hub files are cached. Defaults to `HF_HOME / "assets"` if not provided. + Can also be set with `HF_ASSETS_CACHE` environment variable. + + Returns: + Path to the cache folder (`Path`). + + Example: + ```py + >>> from huggingface_hub import cached_assets_path + + >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="download") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/download') + + >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="extracted") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/extracted') + + >>> cached_assets_path(library_name="datasets", namespace="Helsinki-NLP/tatoeba_mt") + PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/Helsinki-NLP--tatoeba_mt/default') + + >>> cached_assets_path(library_name="datasets", assets_dir="/tmp/tmp123456") + PosixPath('/tmp/tmp123456/datasets/default/default') + ``` + """ + # Resolve assets_dir + if assets_dir is None: + assets_dir = HF_ASSETS_CACHE + assets_dir = Path(assets_dir).expanduser().resolve() + + # Avoid names that could create path issues + for part in (" ", "/", "\\"): + library_name = library_name.replace(part, "--") + namespace = namespace.replace(part, "--") + subfolder = subfolder.replace(part, "--") + + # Path to subfolder is created + path = assets_dir / library_name / namespace / subfolder + try: + path.mkdir(exist_ok=True, parents=True) + except (FileExistsError, NotADirectoryError): + raise ValueError(f"Corrupted assets folder: cannot create directory because of an existing file ({path}).") + + # Return + return path diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..e544884b8793d8d409303cafd34586523fc3fb1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_datetime.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities to handle datetimes in Huggingface Hub.""" + +from datetime import datetime, timezone + + +def parse_datetime(date_string: str) -> datetime: + """ + Parses a date_string returned from the server to a datetime object. + + This parser is a weak-parser is the sense that it handles only a single format of + date_string. It is expected that the server format will never change. The + implementation depends only on the standard lib to avoid an external dependency + (python-dateutil). See full discussion about this decision on PR: + https://github.com/huggingface/huggingface_hub/pull/999. + + Example: + ```py + > parse_datetime('2022-08-19T07:19:38.123Z') + datetime.datetime(2022, 8, 19, 7, 19, 38, 123000, tzinfo=timezone.utc) + ``` + + Args: + date_string (`str`): + A string representing a datetime returned by the Hub server. + String is expected to follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern. + + Returns: + A python datetime object. + + Raises: + :class:`ValueError`: + If `date_string` cannot be parsed. + """ + try: + # Datetime ending with a Z means "UTC". We parse the date and then explicitly + # set the timezone to UTC. + # See https://en.wikipedia.org/wiki/ISO_8601#Coordinated_Universal_Time_(UTC) + # Taken from https://stackoverflow.com/a/3168394. + if len(date_string) == 30: + # Means timezoned-timestamp with nanoseconds precision. We need to truncate the last 3 digits. + date_string = date_string[:-4] + "Z" + dt = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%fZ") + return dt.replace(tzinfo=timezone.utc) # Set explicit timezone + except ValueError as e: + raise ValueError( + f"Cannot parse '{date_string}' as a datetime. Date string is expected to" + " follow '%Y-%m-%dT%H:%M:%S.%fZ' pattern." + ) from e diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..502b22658b44d2221b535cbd943348bb93213245 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_hf_folder.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contain helper class to retrieve/store token from/to local cache.""" + +import warnings +from pathlib import Path +from typing import Optional + +from .. import constants +from ._token import get_token + + +class HfFolder: + path_token = Path(constants.HF_TOKEN_PATH) + # Private attribute. Will be removed in v0.15 + _old_path_token = Path(constants._OLD_HF_TOKEN_PATH) + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.login` instead.") + @classmethod + def save_token(cls, token: str) -> None: + """ + Save token, creating folder as needed. + + Token is saved in the huggingface home folder. You can configure it by setting + the `HF_HOME` environment variable. + + Args: + token (`str`): + The token to save to the [`HfFolder`] + """ + cls.path_token.parent.mkdir(parents=True, exist_ok=True) + cls.path_token.write_text(token) + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.get_token` instead.") + @classmethod + def get_token(cls) -> Optional[str]: + """ + Get token or None if not existent. + + This method is deprecated in favor of [`huggingface_hub.get_token`] but is kept for backward compatibility. + Its behavior is the same as [`huggingface_hub.get_token`]. + + Returns: + `str` or `None`: The token, `None` if it doesn't exist. + """ + # 0. Check if token exist in old path but not new location + try: + cls._copy_to_new_path_and_warn() + except Exception: # if not possible (e.g. PermissionError), do not raise + pass + + return get_token() + + # TODO: deprecate when adapted in transformers/datasets/gradio + # @_deprecate_method(version="1.0", message="Use `huggingface_hub.logout` instead.") + @classmethod + def delete_token(cls) -> None: + """ + Deletes the token from storage. Does not fail if token does not exist. + """ + try: + cls.path_token.unlink() + except FileNotFoundError: + pass + + try: + cls._old_path_token.unlink() + except FileNotFoundError: + pass + + @classmethod + def _copy_to_new_path_and_warn(cls): + if cls._old_path_token.exists() and not cls.path_token.exists(): + cls.save_token(cls._old_path_token.read_text()) + warnings.warn( + f"A token has been found in `{cls._old_path_token}`. This is the old" + " path where tokens were stored. The new location is" + f" `{cls.path_token}` which is configurable using `HF_HOME` environment" + " variable. Your token has been copied to this new location. You can" + " now safely delete the old token file manually or use" + " `huggingface-cli logout`." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..8dab5476cd73f4980b82c641f36adb7dc908cc10 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_runtime.py @@ -0,0 +1,382 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Check presence of installed packages at runtime.""" + +import importlib.metadata +import platform +import sys +import warnings +from typing import Any, Dict + +from .. import __version__, constants + + +_PY_VERSION: str = sys.version.split()[0].rstrip("+") + +_package_versions = {} + +_CANDIDATES = { + "aiohttp": {"aiohttp"}, + "fastai": {"fastai"}, + "fastapi": {"fastapi"}, + "fastcore": {"fastcore"}, + "gradio": {"gradio"}, + "graphviz": {"graphviz"}, + "hf_transfer": {"hf_transfer"}, + "jinja": {"Jinja2"}, + "keras": {"keras"}, + "minijinja": {"minijinja"}, + "numpy": {"numpy"}, + "pillow": {"Pillow"}, + "pydantic": {"pydantic"}, + "pydot": {"pydot"}, + "safetensors": {"safetensors"}, + "tensorboard": {"tensorboardX"}, + "tensorflow": ( + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "intel-tensorflow-avx512", + "tensorflow-rocm", + "tensorflow-macos", + ), + "torch": {"torch"}, +} + +# Check once at runtime +for candidate_name, package_names in _CANDIDATES.items(): + _package_versions[candidate_name] = "N/A" + for name in package_names: + try: + _package_versions[candidate_name] = importlib.metadata.version(name) + break + except importlib.metadata.PackageNotFoundError: + pass + + +def _get_version(package_name: str) -> str: + return _package_versions.get(package_name, "N/A") + + +def is_package_available(package_name: str) -> bool: + return _get_version(package_name) != "N/A" + + +# Python +def get_python_version() -> str: + return _PY_VERSION + + +# Huggingface Hub +def get_hf_hub_version() -> str: + return __version__ + + +# aiohttp +def is_aiohttp_available() -> bool: + return is_package_available("aiohttp") + + +def get_aiohttp_version() -> str: + return _get_version("aiohttp") + + +# FastAI +def is_fastai_available() -> bool: + return is_package_available("fastai") + + +def get_fastai_version() -> str: + return _get_version("fastai") + + +# FastAPI +def is_fastapi_available() -> bool: + return is_package_available("fastapi") + + +def get_fastapi_version() -> str: + return _get_version("fastapi") + + +# Fastcore +def is_fastcore_available() -> bool: + return is_package_available("fastcore") + + +def get_fastcore_version() -> str: + return _get_version("fastcore") + + +# FastAI +def is_gradio_available() -> bool: + return is_package_available("gradio") + + +def get_gradio_version() -> str: + return _get_version("gradio") + + +# Graphviz +def is_graphviz_available() -> bool: + return is_package_available("graphviz") + + +def get_graphviz_version() -> str: + return _get_version("graphviz") + + +# hf_transfer +def is_hf_transfer_available() -> bool: + return is_package_available("hf_transfer") + + +def get_hf_transfer_version() -> str: + return _get_version("hf_transfer") + + +# keras +def is_keras_available() -> bool: + return is_package_available("keras") + + +def get_keras_version() -> str: + return _get_version("keras") + + +# Minijinja +def is_minijinja_available() -> bool: + return is_package_available("minijinja") + + +def get_minijinja_version() -> str: + return _get_version("minijinja") + + +# Numpy +def is_numpy_available() -> bool: + return is_package_available("numpy") + + +def get_numpy_version() -> str: + return _get_version("numpy") + + +# Jinja +def is_jinja_available() -> bool: + return is_package_available("jinja") + + +def get_jinja_version() -> str: + return _get_version("jinja") + + +# Pillow +def is_pillow_available() -> bool: + return is_package_available("pillow") + + +def get_pillow_version() -> str: + return _get_version("pillow") + + +# Pydantic +def is_pydantic_available() -> bool: + if not is_package_available("pydantic"): + return False + # For Pydantic, we add an extra check to test whether it is correctly installed or not. If both pydantic 2.x and + # typing_extensions<=4.5.0 are installed, then pydantic will fail at import time. This should not happen when + # it is installed with `pip install huggingface_hub[inference]` but it can happen when it is installed manually + # by the user in an environment that we don't control. + # + # Usually we won't need to do this kind of check on optional dependencies. However, pydantic is a special case + # as it is automatically imported when doing `from huggingface_hub import ...` even if the user doesn't use it. + # + # See https://github.com/huggingface/huggingface_hub/pull/1829 for more details. + try: + from pydantic import validator # noqa: F401 + except ImportError: + # Example: "ImportError: cannot import name 'TypeAliasType' from 'typing_extensions'" + warnings.warn( + "Pydantic is installed but cannot be imported. Please check your installation. `huggingface_hub` will " + "default to not using Pydantic. Error message: '{e}'" + ) + return False + return True + + +def get_pydantic_version() -> str: + return _get_version("pydantic") + + +# Pydot +def is_pydot_available() -> bool: + return is_package_available("pydot") + + +def get_pydot_version() -> str: + return _get_version("pydot") + + +# Tensorboard +def is_tensorboard_available() -> bool: + return is_package_available("tensorboard") + + +def get_tensorboard_version() -> str: + return _get_version("tensorboard") + + +# Tensorflow +def is_tf_available() -> bool: + return is_package_available("tensorflow") + + +def get_tf_version() -> str: + return _get_version("tensorflow") + + +# Torch +def is_torch_available() -> bool: + return is_package_available("torch") + + +def get_torch_version() -> str: + return _get_version("torch") + + +# Safetensors +def is_safetensors_available() -> bool: + return is_package_available("safetensors") + + +# Shell-related helpers +try: + # Set to `True` if script is running in a Google Colab notebook. + # If running in Google Colab, git credential store is set globally which makes the + # warning disappear. See https://github.com/huggingface/huggingface_hub/issues/1043 + # + # Taken from https://stackoverflow.com/a/63519730. + _is_google_colab = "google.colab" in str(get_ipython()) # type: ignore # noqa: F821 +except NameError: + _is_google_colab = False + + +def is_notebook() -> bool: + """Return `True` if code is executed in a notebook (Jupyter, Colab, QTconsole). + + Taken from https://stackoverflow.com/a/39662359. + Adapted to make it work with Google colab as well. + """ + try: + shell_class = get_ipython().__class__ # type: ignore # noqa: F821 + for parent_class in shell_class.__mro__: # e.g. "is subclass of" + if parent_class.__name__ == "ZMQInteractiveShell": + return True # Jupyter notebook, Google colab or qtconsole + return False + except NameError: + return False # Probably standard Python interpreter + + +def is_google_colab() -> bool: + """Return `True` if code is executed in a Google colab. + + Taken from https://stackoverflow.com/a/63519730. + """ + return _is_google_colab + + +def dump_environment_info() -> Dict[str, Any]: + """Dump information about the machine to help debugging issues. + + Similar helper exist in: + - `datasets` (https://github.com/huggingface/datasets/blob/main/src/datasets/commands/env.py) + - `diffusers` (https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/env.py) + - `transformers` (https://github.com/huggingface/transformers/blob/main/src/transformers/commands/env.py) + """ + from huggingface_hub import get_token, whoami + from huggingface_hub.utils import list_credential_helpers + + token = get_token() + + # Generic machine info + info: Dict[str, Any] = { + "huggingface_hub version": get_hf_hub_version(), + "Platform": platform.platform(), + "Python version": get_python_version(), + } + + # Interpreter info + try: + shell_class = get_ipython().__class__ # type: ignore # noqa: F821 + info["Running in iPython ?"] = "Yes" + info["iPython shell"] = shell_class.__name__ + except NameError: + info["Running in iPython ?"] = "No" + info["Running in notebook ?"] = "Yes" if is_notebook() else "No" + info["Running in Google Colab ?"] = "Yes" if is_google_colab() else "No" + + # Login info + info["Token path ?"] = constants.HF_TOKEN_PATH + info["Has saved token ?"] = token is not None + if token is not None: + try: + info["Who am I ?"] = whoami()["name"] + except Exception: + pass + + try: + info["Configured git credential helpers"] = ", ".join(list_credential_helpers()) + except Exception: + pass + + # Installed dependencies + info["FastAI"] = get_fastai_version() + info["Tensorflow"] = get_tf_version() + info["Torch"] = get_torch_version() + info["Jinja2"] = get_jinja_version() + info["Graphviz"] = get_graphviz_version() + info["keras"] = get_keras_version() + info["Pydot"] = get_pydot_version() + info["Pillow"] = get_pillow_version() + info["hf_transfer"] = get_hf_transfer_version() + info["gradio"] = get_gradio_version() + info["tensorboard"] = get_tensorboard_version() + info["numpy"] = get_numpy_version() + info["pydantic"] = get_pydantic_version() + info["aiohttp"] = get_aiohttp_version() + + # Environment variables + info["ENDPOINT"] = constants.ENDPOINT + info["HF_HUB_CACHE"] = constants.HF_HUB_CACHE + info["HF_ASSETS_CACHE"] = constants.HF_ASSETS_CACHE + info["HF_TOKEN_PATH"] = constants.HF_TOKEN_PATH + info["HF_HUB_OFFLINE"] = constants.HF_HUB_OFFLINE + info["HF_HUB_DISABLE_TELEMETRY"] = constants.HF_HUB_DISABLE_TELEMETRY + info["HF_HUB_DISABLE_PROGRESS_BARS"] = constants.HF_HUB_DISABLE_PROGRESS_BARS + info["HF_HUB_DISABLE_SYMLINKS_WARNING"] = constants.HF_HUB_DISABLE_SYMLINKS_WARNING + info["HF_HUB_DISABLE_EXPERIMENTAL_WARNING"] = constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING + info["HF_HUB_DISABLE_IMPLICIT_TOKEN"] = constants.HF_HUB_DISABLE_IMPLICIT_TOKEN + info["HF_HUB_ENABLE_HF_TRANSFER"] = constants.HF_HUB_ENABLE_HF_TRANSFER + info["HF_HUB_ETAG_TIMEOUT"] = constants.HF_HUB_ETAG_TIMEOUT + info["HF_HUB_DOWNLOAD_TIMEOUT"] = constants.HF_HUB_DOWNLOAD_TIMEOUT + + print("\nCopy-and-paste the text below in your GitHub issue.\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n") + return info diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py new file mode 100644 index 0000000000000000000000000000000000000000..38546c6d34db786c62861e1706f747a21b7012bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_safetensors.py @@ -0,0 +1,111 @@ +import functools +import operator +from collections import defaultdict +from dataclasses import dataclass, field +from typing import Dict, List, Literal, Optional, Tuple + + +FILENAME_T = str +TENSOR_NAME_T = str +DTYPE_T = Literal["F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL"] + + +@dataclass +class TensorInfo: + """Information about a tensor. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + dtype (`str`): + The data type of the tensor ("F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL"). + shape (`List[int]`): + The shape of the tensor. + data_offsets (`Tuple[int, int]`): + The offsets of the data in the file as a tuple `[BEGIN, END]`. + parameter_count (`int`): + The number of parameters in the tensor. + """ + + dtype: DTYPE_T + shape: List[int] + data_offsets: Tuple[int, int] + parameter_count: int = field(init=False) + + def __post_init__(self) -> None: + # Taken from https://stackoverflow.com/a/13840436 + try: + self.parameter_count = functools.reduce(operator.mul, self.shape) + except TypeError: + self.parameter_count = 1 # scalar value has no shape + + +@dataclass +class SafetensorsFileMetadata: + """Metadata for a Safetensors file hosted on the Hub. + + This class is returned by [`parse_safetensors_file_metadata`]. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + metadata (`Dict`): + The metadata contained in the file. + tensors (`Dict[str, TensorInfo]`): + A map of all tensors. Keys are tensor names and values are information about the corresponding tensor, as a + [`TensorInfo`] object. + parameter_count (`Dict[str, int]`): + A map of the number of parameters per data type. Keys are data types and values are the number of parameters + of that data type. + """ + + metadata: Dict[str, str] + tensors: Dict[TENSOR_NAME_T, TensorInfo] + parameter_count: Dict[DTYPE_T, int] = field(init=False) + + def __post_init__(self) -> None: + parameter_count: Dict[DTYPE_T, int] = defaultdict(int) + for tensor in self.tensors.values(): + parameter_count[tensor.dtype] += tensor.parameter_count + self.parameter_count = dict(parameter_count) + + +@dataclass +class SafetensorsRepoMetadata: + """Metadata for a Safetensors repo. + + A repo is considered to be a Safetensors repo if it contains either a 'model.safetensors' weight file (non-shared + model) or a 'model.safetensors.index.json' index file (sharded model) at its root. + + This class is returned by [`get_safetensors_metadata`]. + + For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format. + + Attributes: + metadata (`Dict`, *optional*): + The metadata contained in the 'model.safetensors.index.json' file, if it exists. Only populated for sharded + models. + sharded (`bool`): + Whether the repo contains a sharded model or not. + weight_map (`Dict[str, str]`): + A map of all weights. Keys are tensor names and values are filenames of the files containing the tensors. + files_metadata (`Dict[str, SafetensorsFileMetadata]`): + A map of all files metadata. Keys are filenames and values are the metadata of the corresponding file, as + a [`SafetensorsFileMetadata`] object. + parameter_count (`Dict[str, int]`): + A map of the number of parameters per data type. Keys are data types and values are the number of parameters + of that data type. + """ + + metadata: Optional[Dict] + sharded: bool + weight_map: Dict[TENSOR_NAME_T, FILENAME_T] # tensor name -> filename + files_metadata: Dict[FILENAME_T, SafetensorsFileMetadata] # filename -> metadata + parameter_count: Dict[DTYPE_T, int] = field(init=False) + + def __post_init__(self) -> None: + parameter_count: Dict[DTYPE_T, int] = defaultdict(int) + for file_metadata in self.files_metadata.values(): + for dtype, nb_parameters_ in file_metadata.parameter_count.items(): + parameter_count[dtype] += nb_parameters_ + self.parameter_count = dict(parameter_count) diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..ae502b825bcb900ea076ffe1b0fe1078569821c5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/_typing.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Handle typing imports based on system compatibility.""" + +from typing import Any, Callable, Literal, TypeVar + + +HTTP_METHOD_T = Literal["GET", "OPTIONS", "HEAD", "POST", "PUT", "PATCH", "DELETE"] + +# type hint meaning "function signature not changed by decorator" +CallableT = TypeVar("CallableT", bound=Callable) + +_JSON_SERIALIZABLE_TYPES = (int, float, str, bool, type(None)) + + +def is_jsonable(obj: Any) -> bool: + """Check if an object is JSON serializable. + + This is a weak check, as it does not check for the actual JSON serialization, but only for the types of the object. + It works correctly for basic use cases but do not guarantee an exhaustive check. + + Object is considered to be recursively json serializable if: + - it is an instance of int, float, str, bool, or NoneType + - it is a list or tuple and all its items are json serializable + - it is a dict and all its keys are strings and all its values are json serializable + """ + try: + if isinstance(obj, _JSON_SERIALIZABLE_TYPES): + return True + if isinstance(obj, (list, tuple)): + return all(is_jsonable(item) for item in obj) + if isinstance(obj, dict): + return all(isinstance(key, str) and is_jsonable(value) for key, value in obj.items()) + if hasattr(obj, "__json__"): + return True + return False + except RecursionError: + return False diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/logging.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..3aafdf148135397556b4bb762862377eafdafd14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/logging.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# Copyright 2020 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Logging utilities.""" + +import logging +import os +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Optional + + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _get_default_logging_level(): + """ + If `HF_HUB_VERBOSITY` env var is set to one of the valid choices return that as the new default level. If it is not + - fall back to `_default_log_level` + """ + env_level_str = os.getenv("HF_HUB_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option HF_HUB_VERBOSITY={env_level_str}, has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _configure_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(logging.StreamHandler()) + library_root_logger.setLevel(_get_default_logging_level()) + + +def _reset_library_root_logger() -> None: + library_root_logger = _get_library_root_logger() + library_root_logger.setLevel(logging.NOTSET) + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Returns a logger with the specified name. This function is not supposed + to be directly accessed by library users. + + Args: + name (`str`, *optional*): + The name of the logger to get, usually the filename + + Example: + + ```python + >>> from huggingface_hub import get_logger + + >>> logger = get_logger(__file__) + >>> logger.set_verbosity_info() + ``` + """ + + if name is None: + name = _get_library_name() + + return logging.getLogger(name) + + +def get_verbosity() -> int: + """Return the current level for the HuggingFace Hub's root logger. + + Returns: + Logging level, e.g., `huggingface_hub.logging.DEBUG` and + `huggingface_hub.logging.INFO`. + + + + HuggingFace Hub has following logging levels: + + - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL` + - `huggingface_hub.logging.ERROR` + - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN` + - `huggingface_hub.logging.INFO` + - `huggingface_hub.logging.DEBUG` + + + """ + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Sets the level for the HuggingFace Hub's root logger. + + Args: + verbosity (`int`): + Logging level, e.g., `huggingface_hub.logging.DEBUG` and + `huggingface_hub.logging.INFO`. + """ + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info(): + """ + Sets the verbosity to `logging.INFO`. + """ + return set_verbosity(INFO) + + +def set_verbosity_warning(): + """ + Sets the verbosity to `logging.WARNING`. + """ + return set_verbosity(WARNING) + + +def set_verbosity_debug(): + """ + Sets the verbosity to `logging.DEBUG`. + """ + return set_verbosity(DEBUG) + + +def set_verbosity_error(): + """ + Sets the verbosity to `logging.ERROR`. + """ + return set_verbosity(ERROR) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is + disabled by default. + """ + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the + HuggingFace Hub's default handler to prevent double logging if the root + logger has been configured. + """ + _get_library_root_logger().propagate = True + + +_configure_library_root_logger() diff --git a/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/sha.py b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/sha.py new file mode 100644 index 0000000000000000000000000000000000000000..233ab074e69a47de9a443a458ce44e1429a1e22c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/huggingface_hub/utils/sha.py @@ -0,0 +1,29 @@ +"""Utilities to efficiently compute the SHA 256 hash of a bunch of bytes.""" + +from typing import BinaryIO, Optional + +from .insecure_hashlib import sha256 + + +def sha_fileobj(fileobj: BinaryIO, chunk_size: Optional[int] = None) -> bytes: + """ + Computes the sha256 hash of the given file object, by chunks of size `chunk_size`. + + Args: + fileobj (file-like object): + The File object to compute sha256 for, typically obtained with `open(path, "rb")` + chunk_size (`int`, *optional*): + The number of bytes to read from `fileobj` at once, defaults to 1MB. + + Returns: + `bytes`: `fileobj`'s sha256 hash as bytes + """ + chunk_size = chunk_size if chunk_size is not None else 1024 * 1024 + + sha = sha256() + while True: + chunk = fileobj.read(chunk_size) + sha.update(chunk) + if not chunk: + break + return sha.digest() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4517d3cd03ca749166ce232e417b096f5350469b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9cad6cb4f001003e37473798a839fa7fed61904 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b6aff42704f6c5c053c2950ebb948c91693cb79 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__config__.py b/llmeval-env/lib/python3.10/site-packages/scipy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a40526d1d3326491cbd6f6169cf57e258645a22 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/__config__.py @@ -0,0 +1,161 @@ +# This file is generated by SciPy's build process +# It contains system_info results at the time of building this package. +from enum import Enum + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return { k: _cleanup(v) for k, v in d.items() if v != '' and _cleanup(v) != '' } + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": r"cython", + "linker": r"cython", + "version": r"3.0.10", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + "fortran": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"gfortran", + "args": r"", + "linker args": r"", + }, + "pythran": { + "version": r"0.15.0", + "include directory": r"../../tmp/pip-build-env-0blqy1or/overlay/lib/python3.10/site-packages/pythran" + }, + }, + "Machine Information": { + "host": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "build": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "cross-compiled": bool("False".lower().replace('false', '')), + }, + "Build Dependencies": { + "blas": { + "name": "openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.26.dev", + "detection method": "pkgconfig", + "include directory": r"/usr/local/include", + "lib directory": r"/usr/local/lib", + "openblas configuration": r"USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64", + "pc file directory": r"/usr/local/lib/pkgconfig", + }, + "lapack": { + "name": "openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.26.dev", + "detection method": "pkgconfig", + "include directory": r"/usr/local/include", + "lib directory": r"/usr/local/lib", + "openblas configuration": r"USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64", + "pc file directory": r"/usr/local/lib/pkgconfig", + }, + "pybind11": { + "name": "pybind11", + "version": "2.12.0", + "detection method": "config-tool", + "include directory": r"unknown", + }, + }, + "Python Information": { + "path": r"/opt/python/cp310-cp310/bin/python", + "version": "3.10", + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which SciPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bbed182a50ee24fe15d2a4a13847684317b318 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/__init__.py @@ -0,0 +1,141 @@ +""" +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + constants --- Physical and mathematical constants and units + datasets --- Dataset methods + fft --- Discrete Fourier transforms + fftpack --- Legacy discrete Fourier transforms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + misc --- Utilities that don't have another home. + ndimage --- N-D image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Public API in the main SciPy namespace +-------------------------------------- +:: + + __version__ --- SciPy version string + LowLevelCallable --- Low-level callback function + show_config --- Show scipy build configuration + test --- Run scipy unittests + +""" + +import importlib as _importlib + +from numpy import __version__ as __numpy_version__ + + +try: + from scipy.__config__ import show as show_config +except ImportError as e: + msg = """Error importing SciPy: you cannot import SciPy while + being in scipy source directory; please exit the SciPy source + tree first and relaunch your Python interpreter.""" + raise ImportError(msg) from e + + +from scipy.version import version as __version__ + + +# Allow distributors to run custom init code +from . import _distributor_init +del _distributor_init + + +from scipy._lib import _pep440 +# In maintenance branch, change to np_maxversion N+3 if numpy is at N +np_minversion = '1.22.4' +np_maxversion = '2.3.0' +if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or + _pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)): + import warnings + warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" + f" is required for this version of SciPy (detected " + f"version {__numpy_version__})", + UserWarning, stacklevel=2) +del _pep440 + + +# This is the first import of an extension module within SciPy. If there's +# a general issue with the install, such that extension modules are missing +# or cannot be imported, this is where we'll get a failure - so give an +# informative error message. +try: + from scipy._lib._ccallback import LowLevelCallable +except ImportError as e: + msg = "The `scipy` install you are using seems to be broken, " + \ + "(extension modules cannot be imported), " + \ + "please try reinstalling." + raise ImportError(msg) from e + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester + + +submodules = [ + 'cluster', + 'constants', + 'datasets', + 'fft', + 'fftpack', + 'integrate', + 'interpolate', + 'io', + 'linalg', + 'misc', + 'ndimage', + 'odr', + 'optimize', + 'signal', + 'sparse', + 'spatial', + 'special', + 'stats' +] + +__all__ = submodules + [ + 'LowLevelCallable', + 'test', + 'show_config', + '__version__', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name in submodules: + return _importlib.import_module(f'scipy.{name}') + else: + try: + return globals()[name] + except KeyError: + raise AttributeError( + f"Module 'scipy' has no attribute '{name}'" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/_distributor_init.py b/llmeval-env/lib/python3.10/site-packages/scipy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5df134975aa27d31beaff74c3cbfd2d3fb0a55dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/_distributor_init.py @@ -0,0 +1,18 @@ +""" Distributor init file + +Distributors: you can replace the contents of this file with your own custom +code to support particular distributions of SciPy. + +For example, this is a good place to put any checks for hardware requirements +or BLAS/LAPACK library initialization. + +The SciPy standard source distribution will not put code in this file beyond +the try-except import of `_distributor_init_local` (which is not part of a +standard source distribution), so you can safely replace this file with your +own version. +""" + +try: + from . import _distributor_init_local # noqa: F401 +except ImportError: + pass diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c975bacc61311a4dd020dc9baaedba3befe319ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/__init__.py @@ -0,0 +1,31 @@ +""" +========================================= +Clustering package (:mod:`scipy.cluster`) +========================================= + +.. currentmodule:: scipy.cluster + +.. toctree:: + :hidden: + + cluster.vq + cluster.hierarchy + +Clustering algorithms are useful in information theory, target detection, +communications, compression, and other areas. The `vq` module only +supports vector quantization and the k-means algorithms. + +The `hierarchy` module provides functions for hierarchical and +agglomerative clustering. Its features include generating hierarchical +clusters from distance matrices, +calculating statistics on clusters, cutting linkages +to generate flat clusters, and visualizing clusters with dendrograms. + +""" +__all__ = ['vq', 'hierarchy'] + +from . import vq, hierarchy + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2c26033c53e222410c7809efade512ef1346cb31 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c5117f66d4551253475ddde28e49f6f169d8fbb9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ed59337d4cf218405bd5c30219f86fcc937cde95 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/hierarchy.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..6bdafcc7d57650e05020d2a6784b98b827da4f18 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/hierarchy.py @@ -0,0 +1,4173 @@ +""" +Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) +======================================================== + +.. currentmodule:: scipy.cluster.hierarchy + +These functions cut hierarchical clusterings into flat clusterings +or find the roots of the forest formed by a cut by providing the flat +cluster ids of each observation. + +.. autosummary:: + :toctree: generated/ + + fcluster + fclusterdata + leaders + +These are routines for agglomerative clustering. + +.. autosummary:: + :toctree: generated/ + + linkage + single + complete + average + weighted + centroid + median + ward + +These routines compute statistics on hierarchies. + +.. autosummary:: + :toctree: generated/ + + cophenet + from_mlab_linkage + inconsistent + maxinconsts + maxdists + maxRstat + to_mlab_linkage + +Routines for visualizing flat clusters. + +.. autosummary:: + :toctree: generated/ + + dendrogram + +These are data structures and routines for representing hierarchies as +tree objects. + +.. autosummary:: + :toctree: generated/ + + ClusterNode + leaves_list + to_tree + cut_tree + optimal_leaf_ordering + +These are predicates for checking the validity of linkage and +inconsistency matrices as well as for checking isomorphism of two +flat cluster assignments. + +.. autosummary:: + :toctree: generated/ + + is_valid_im + is_valid_linkage + is_isomorphic + is_monotonic + correspond + num_obs_linkage + +Utility routines for plotting: + +.. autosummary:: + :toctree: generated/ + + set_link_color_palette + +Utility classes: + +.. autosummary:: + :toctree: generated/ + + DisjointSet -- data structure for incremental connectivity queries + +""" +# Copyright (C) Damian Eads, 2007-2008. New BSD License. + +# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) +# +# Author: Damian Eads +# Date: September 22, 2007 +# +# Copyright (c) 2007, 2008, Damian Eads +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# - Redistributions of source code must retain the above +# copyright notice, this list of conditions and the +# following disclaimer. +# - Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# - Neither the name of the author nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import warnings +import bisect +from collections import deque + +import numpy as np +from . import _hierarchy, _optimal_leaf_ordering +import scipy.spatial.distance as distance +from scipy._lib._array_api import array_namespace, _asarray, copy +from scipy._lib._disjoint_set import DisjointSet + + +_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, + 'median': 4, 'ward': 5, 'weighted': 6} +_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') + +__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete', + 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster', + 'fclusterdata', 'from_mlab_linkage', 'inconsistent', + 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage', + 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists', + 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering', + 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', + 'ward', 'weighted'] + + +class ClusterWarning(UserWarning): + pass + + +def _warning(s): + warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3) + + +def int_floor(arr, xp): + # array_api_strict is strict about not allowing `int()` on a float array. + # That's typically not needed, here it is - so explicitly convert + return int(xp.astype(xp.asarray(arr), xp.int64)) + + +def single(y): + """ + Perform single/min/nearest linkage on the condensed distance matrix ``y``. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + The linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = single(y) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32) + >>> fcluster(Z, 1, criterion='distance') + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='single', metric='euclidean') + + +def complete(y): + """ + Perform complete/max/farthest point linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import complete, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = complete(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.41421356, 3. ], + [ 5. , 13. , 1.41421356, 3. ], + [ 8. , 14. , 1.41421356, 3. ], + [11. , 15. , 1.41421356, 3. ], + [16. , 17. , 4.12310563, 6. ], + [18. , 19. , 4.12310563, 6. ], + [20. , 21. , 5.65685425, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4.5, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + """ + return linkage(y, method='complete', metric='euclidean') + + +def average(y): + """ + Perform average/UPGMA linkage on a condensed distance matrix. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import average, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = average(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 5. , 13. , 1.20710678, 3. ], + [ 8. , 14. , 1.20710678, 3. ], + [11. , 15. , 1.20710678, 3. ], + [16. , 17. , 3.39675184, 6. ], + [18. , 19. , 3.39675184, 6. ], + [20. , 21. , 4.09206523, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='average', metric='euclidean') + + +def weighted(y): + """ + Perform weighted/WPGMA linkage on the condensed distance matrix. + + See `linkage` for more information on the return + structure and algorithm. + + Parameters + ---------- + y : ndarray + The upper triangular of the distance matrix. The result of + ``pdist`` is returned in this form. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + `linkage` for more information on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import weighted, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = weighted(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 11. , 1. , 2. ], + [ 2. , 12. , 1.20710678, 3. ], + [ 8. , 13. , 1.20710678, 3. ], + [ 5. , 14. , 1.20710678, 3. ], + [10. , 15. , 1.20710678, 3. ], + [18. , 19. , 3.05595762, 6. ], + [16. , 17. , 3.32379407, 6. ], + [20. , 21. , 4.06357713, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) + >>> fcluster(Z, 1.5, criterion='distance') + array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) + >>> fcluster(Z, 6, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='weighted', metric='euclidean') + + +def centroid(y): + """ + Perform centroid/UPGMC linkage. + + See `linkage` for more information on the input matrix, + return structure, and algorithm. + + The following are common calling conventions: + + 1. ``Z = centroid(y)`` + + Performs centroid/UPGMC linkage on the condensed distance + matrix ``y``. + + 2. ``Z = centroid(X)`` + + Performs centroid/UPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + A linkage matrix containing the hierarchical clustering. See + the `linkage` function documentation for more information + on its structure. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import centroid, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = centroid(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3.33333333, 6. ], + [16. , 17. , 3.33333333, 6. ], + [20. , 21. , 3.33333333, 12. ]]) # may vary + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) # may vary + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) # may vary + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) # may vary + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='centroid', metric='euclidean') + + +def median(y): + """ + Perform median/WPGMC linkage. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = median(y)`` + + Performs median/WPGMC linkage on the condensed distance matrix + ``y``. See ``linkage`` for more information on the return + structure and algorithm. + + 2. ``Z = median(X)`` + + Performs median/WPGMC linkage on the observation matrix ``X`` + using Euclidean distance as the distance metric. See `linkage` + for more information on the return structure and algorithm. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = median(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32) + >>> fcluster(Z, 2, criterion='distance') + array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32) + >>> fcluster(Z, 4, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='median', metric='euclidean') + + +def ward(y): + """ + Perform Ward's linkage on a condensed distance matrix. + + See `linkage` for more information on the return structure + and algorithm. + + The following are common calling conventions: + + 1. ``Z = ward(y)`` + Performs Ward's linkage on the condensed distance matrix ``y``. + + 2. ``Z = ward(X)`` + Performs Ward's linkage on the observation matrix ``X`` using + Euclidean distance as the distance metric. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed + distance matrix is a flat array containing the upper + triangular of the distance matrix. This is the form that + ``pdist`` returns. Alternatively, a collection of + m observation vectors in n dimensions may be passed as + an m by n array. + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + + See Also + -------- + linkage : for advanced creation of hierarchical clusterings. + scipy.spatial.distance.pdist : pairwise distance metrics + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + First, we need a toy dataset to play with:: + + x x x x + x x + + x x + x x x x + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + Then, we get a condensed distance matrix from this dataset: + + >>> y = pdist(X) + + Finally, we can perform the clustering: + + >>> Z = ward(y) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + The linkage matrix ``Z`` represents a dendrogram - see + `scipy.cluster.hierarchy.linkage` for a detailed explanation of its + contents. + + We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster + each initial point would belong given a distance threshold: + + >>> fcluster(Z, 0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + >>> fcluster(Z, 1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + >>> fcluster(Z, 3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + >>> fcluster(Z, 9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a + plot of the dendrogram. + + """ + return linkage(y, method='ward', metric='euclidean') + + +def linkage(y, method='single', metric='euclidean', optimal_ordering=False): + """ + Perform hierarchical/agglomerative clustering. + + The input y may be either a 1-D condensed distance matrix + or a 2-D array of observation vectors. + + If y is a 1-D condensed distance matrix, + then y must be a :math:`\\binom{n}{2}` sized + vector, where n is the number of original observations paired + in the distance matrix. The behavior of this function is very + similar to the MATLAB linkage function. + + A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the + :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and + ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A + cluster with an index less than :math:`n` corresponds to one of + the :math:`n` original observations. The distance between + clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The + fourth value ``Z[i, 3]`` represents the number of original + observations in the newly formed cluster. + + The following linkage methods are used to compute the distance + :math:`d(s, t)` between two clusters :math:`s` and + :math:`t`. The algorithm begins with a forest of clusters that + have yet to be used in the hierarchy being formed. When two + clusters :math:`s` and :math:`t` from this forest are combined + into a single cluster :math:`u`, :math:`s` and :math:`t` are + removed from the forest, and :math:`u` is added to the + forest. When only one cluster remains in the forest, the algorithm + stops, and this cluster becomes the root. + + A distance matrix is maintained at each iteration. The ``d[i,j]`` + entry corresponds to the distance between cluster :math:`i` and + :math:`j` in the original forest. + + At each iteration, the algorithm must update the distance matrix + to reflect the distance of the newly formed cluster u with the + remaining clusters in the forest. + + Suppose there are :math:`|u|` original observations + :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and + :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in + cluster :math:`v`. Recall, :math:`s` and :math:`t` are + combined to form cluster :math:`u`. Let :math:`v` be any + remaining cluster in the forest that is not :math:`u`. + + The following are methods for calculating the distance between the + newly formed cluster :math:`u` and each :math:`v`. + + * method='single' assigns + + .. math:: + d(u,v) = \\min(dist(u[i],v[j])) + + for all points :math:`i` in cluster :math:`u` and + :math:`j` in cluster :math:`v`. This is also known as the + Nearest Point Algorithm. + + * method='complete' assigns + + .. math:: + d(u, v) = \\max(dist(u[i],v[j])) + + for all points :math:`i` in cluster u and :math:`j` in + cluster :math:`v`. This is also known by the Farthest Point + Algorithm or Voor Hees Algorithm. + + * method='average' assigns + + .. math:: + d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} + {(|u|*|v|)} + + for all points :math:`i` and :math:`j` where :math:`|u|` + and :math:`|v|` are the cardinalities of clusters :math:`u` + and :math:`v`, respectively. This is also called the UPGMA + algorithm. + + * method='weighted' assigns + + .. math:: + d(u,v) = (dist(s,v) + dist(t,v))/2 + + where cluster u was formed with cluster s and t and v + is a remaining cluster in the forest (also called WPGMA). + + * method='centroid' assigns + + .. math:: + dist(s,t) = ||c_s-c_t||_2 + + where :math:`c_s` and :math:`c_t` are the centroids of + clusters :math:`s` and :math:`t`, respectively. When two + clusters :math:`s` and :math:`t` are combined into a new + cluster :math:`u`, the new centroid is computed over all the + original objects in clusters :math:`s` and :math:`t`. The + distance then becomes the Euclidean distance between the + centroid of :math:`u` and the centroid of a remaining cluster + :math:`v` in the forest. This is also known as the UPGMC + algorithm. + + * method='median' assigns :math:`d(s,t)` like the ``centroid`` + method. When two clusters :math:`s` and :math:`t` are combined + into a new cluster :math:`u`, the average of centroids s and t + give the new centroid :math:`u`. This is also known as the + WPGMC algorithm. + + * method='ward' uses the Ward variance minimization algorithm. + The new entry :math:`d(u,v)` is computed as follows, + + .. math:: + + d(u,v) = \\sqrt{\\frac{|v|+|s|} + {T}d(v,s)^2 + + \\frac{|v|+|t|} + {T}d(v,t)^2 + - \\frac{|v|} + {T}d(s,t)^2} + + where :math:`u` is the newly joined cluster consisting of + clusters :math:`s` and :math:`t`, :math:`v` is an unused + cluster in the forest, :math:`T=|v|+|s|+|t|`, and + :math:`|*|` is the cardinality of its argument. This is also + known as the incremental algorithm. + + Warning: When the minimum distance pair in the forest is chosen, there + may be two or more pairs with the same minimum distance. This + implementation may choose a different minimum than the MATLAB + version. + + Parameters + ---------- + y : ndarray + A condensed distance matrix. A condensed distance matrix + is a flat array containing the upper triangular of the distance matrix. + This is the form that ``pdist`` returns. Alternatively, a collection of + :math:`m` observation vectors in :math:`n` dimensions may be passed as + an :math:`m` by :math:`n` array. All elements of the condensed distance + matrix must be finite, i.e., no NaNs or infs. + method : str, optional + The linkage algorithm to use. See the ``Linkage Methods`` section below + for full descriptions. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + optimal_ordering : bool, optional + If True, the linkage matrix will be reordered so that the distance + between successive leaves is minimal. This results in a more intuitive + tree structure when the data are visualized. defaults to False, because + this algorithm can be slow, particularly on large datasets [2]_. See + also the `optimal_leaf_ordering` function. + + .. versionadded:: 1.0.0 + + Returns + ------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. + + Notes + ----- + 1. For method 'single', an optimized algorithm based on minimum spanning + tree is implemented. It has time complexity :math:`O(n^2)`. + For methods 'complete', 'average', 'weighted' and 'ward', an algorithm + called nearest-neighbors chain is implemented. It also has time + complexity :math:`O(n^2)`. + For other methods, a naive algorithm is implemented with :math:`O(n^3)` + time complexity. + All algorithms use :math:`O(n^2)` memory. + Refer to [1]_ for details about the algorithms. + 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if + Euclidean pairwise metric is used. If `y` is passed as precomputed + pairwise distances, then it is the user's responsibility to assure that + these distances are in fact Euclidean, otherwise the produced result + will be incorrect. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + References + ---------- + .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering + algorithms", :arXiv:`1109.2378v1`. + .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal + leaf ordering for hierarchical clustering", 2001. Bioinformatics + :doi:`10.1093/bioinformatics/17.suppl_1.S22` + + Examples + -------- + >>> from scipy.cluster.hierarchy import dendrogram, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + + >>> Z = linkage(X, 'ward') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + + >>> Z = linkage(X, 'single') + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + """ + xp = array_namespace(y) + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if method not in _LINKAGE_METHODS: + raise ValueError(f"Invalid method: {method}") + + if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2: + msg = f"`method={method}` requires the distance metric to be Euclidean" + raise ValueError(msg) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + xp.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + n = int(distance.num_obs_y(y)) + method_code = _LINKAGE_METHODS[method] + + y = np.asarray(y) + if method == 'single': + result = _hierarchy.mst_single_linkage(y, n) + elif method in ['complete', 'average', 'weighted', 'ward']: + result = _hierarchy.nn_chain(y, n, method_code) + else: + result = _hierarchy.fast_linkage(y, n, method_code) + result = xp.asarray(result) + + if optimal_ordering: + y = xp.asarray(y) + return optimal_leaf_ordering(result, y) + else: + return result + + +class ClusterNode: + """ + A tree node class for representing a cluster. + + Leaf nodes correspond to original observations, while non-leaf nodes + correspond to non-singleton clusters. + + The `to_tree` function converts a matrix returned by the linkage + function into an easy-to-use tree representation. + + All parameter names are also attributes. + + Parameters + ---------- + id : int + The node id. + left : ClusterNode instance, optional + The left child tree node. + right : ClusterNode instance, optional + The right child tree node. + dist : float, optional + Distance for this cluster in the linkage matrix. + count : int, optional + The number of samples in this cluster. + + See Also + -------- + to_tree : for converting a linkage matrix ``Z`` into a tree object. + + """ + + def __init__(self, id, left=None, right=None, dist=0, count=1): + if id < 0: + raise ValueError('The id must be non-negative.') + if dist < 0: + raise ValueError('The distance must be non-negative.') + if (left is None and right is not None) or \ + (left is not None and right is None): + raise ValueError('Only full or proper binary trees are permitted.' + ' This node has one child.') + if count < 1: + raise ValueError('A cluster must contain at least one original ' + 'observation.') + self.id = id + self.left = left + self.right = right + self.dist = dist + if self.left is None: + self.count = count + else: + self.count = left.count + right.count + + def __lt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist < node.dist + + def __gt__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist > node.dist + + def __eq__(self, node): + if not isinstance(node, ClusterNode): + raise ValueError("Can't compare ClusterNode " + f"to type {type(node)}") + return self.dist == node.dist + + def get_id(self): + """ + The identifier of the target node. + + For ``0 <= i < n``, `i` corresponds to original observation i. + For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed + at iteration ``i-n``. + + Returns + ------- + id : int + The identifier of the target node. + + """ + return self.id + + def get_count(self): + """ + The number of leaf nodes (original observations) belonging to + the cluster node nd. If the target node is a leaf, 1 is + returned. + + Returns + ------- + get_count : int + The number of leaf nodes below the target node. + + """ + return self.count + + def get_left(self): + """ + Return a reference to the left child tree object. + + Returns + ------- + left : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.left + + def get_right(self): + """ + Return a reference to the right child tree object. + + Returns + ------- + right : ClusterNode + The left child of the target node. If the node is a leaf, + None is returned. + + """ + return self.right + + def is_leaf(self): + """ + Return True if the target node is a leaf. + + Returns + ------- + leafness : bool + True if the target node is a leaf node. + + """ + return self.left is None + + def pre_order(self, func=(lambda x: x.id)): + """ + Perform pre-order traversal without recursive function calls. + + When a leaf node is first encountered, ``func`` is called with + the leaf node as its argument, and its result is appended to + the list. + + For example, the statement:: + + ids = root.pre_order(lambda x: x.id) + + returns a list of the node ids corresponding to the leaf nodes + of the tree as they appear from left to right. + + Parameters + ---------- + func : function + Applied to each leaf ClusterNode object in the pre-order traversal. + Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, + the result of ``func(n[i])`` is stored in ``L[i]``. If not + provided, the index of the original observation to which the node + corresponds is used. + + Returns + ------- + L : list + The pre-order traversal. + + """ + # Do a preorder traversal, caching the result. To avoid having to do + # recursion, we'll store the previous index we've visited in a vector. + n = self.count + + curNode = [None] * (2 * n) + lvisited = set() + rvisited = set() + curNode[0] = self + k = 0 + preorder = [] + while k >= 0: + nd = curNode[k] + ndid = nd.id + if nd.is_leaf(): + preorder.append(func(nd)) + k = k - 1 + else: + if ndid not in lvisited: + curNode[k + 1] = nd.left + lvisited.add(ndid) + k = k + 1 + elif ndid not in rvisited: + curNode[k + 1] = nd.right + rvisited.add(ndid) + k = k + 1 + # If we've visited the left and right of this non-leaf + # node already, go up in the tree. + else: + k = k - 1 + + return preorder + + +_cnode_bare = ClusterNode(0) +_cnode_type = type(ClusterNode) + + +def _order_cluster_tree(Z): + """ + Return clustering nodes in bottom-up order by distance. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + + Returns + ------- + nodes : list + A list of ClusterNode objects. + """ + q = deque() + tree = to_tree(Z) + q.append(tree) + nodes = [] + + while q: + node = q.popleft() + if not node.is_leaf(): + bisect.insort_left(nodes, node) + q.append(node.get_right()) + q.append(node.get_left()) + return nodes + + +def cut_tree(Z, n_clusters=None, height=None): + """ + Given a linkage matrix Z, return the cut tree. + + Parameters + ---------- + Z : scipy.cluster.linkage array + The linkage matrix. + n_clusters : array_like, optional + Number of clusters in the tree at the cut point. + height : array_like, optional + The height at which to cut the tree. Only possible for ultrametric + trees. + + Returns + ------- + cutree : array + An array indicating group membership at each agglomeration step. I.e., + for a full cut tree, in the first column each data point is in its own + cluster. At the next step, two nodes are merged. Finally, all + singleton and non-singleton clusters are in one group. If `n_clusters` + or `height` are given, the columns correspond to the columns of + `n_clusters` or `height`. + + Examples + -------- + >>> from scipy import cluster + >>> import numpy as np + >>> from numpy.random import default_rng + >>> rng = default_rng() + >>> X = rng.random((50, 4)) + >>> Z = cluster.hierarchy.ward(X) + >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) + >>> cutree[:10] + array([[0, 0], + [1, 1], + [2, 2], + [3, 3], + [3, 4], + [2, 2], + [0, 0], + [1, 5], + [3, 6], + [4, 7]]) # random + + """ + xp = array_namespace(Z) + nobs = num_obs_linkage(Z) + nodes = _order_cluster_tree(Z) + + if height is not None and n_clusters is not None: + raise ValueError("At least one of either height or n_clusters " + "must be None") + elif height is None and n_clusters is None: # return the full cut tree + cols_idx = xp.arange(nobs) + elif height is not None: + height = xp.asarray(height) + heights = xp.asarray([x.dist for x in nodes]) + cols_idx = xp.searchsorted(heights, height) + else: + n_clusters = xp.asarray(n_clusters) + cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters) + + try: + n_cols = len(cols_idx) + except TypeError: # scalar + n_cols = 1 + cols_idx = xp.asarray([cols_idx]) + + groups = xp.zeros((n_cols, nobs), dtype=xp.int64) + last_group = xp.arange(nobs) + if 0 in cols_idx: + groups[0] = last_group + + for i, node in enumerate(nodes): + idx = node.pre_order() + this_group = copy(last_group, xp=xp) + # TODO ARRAY_API complex indexing not supported + this_group[idx] = xp.min(last_group[idx]) + this_group[this_group > xp.max(last_group[idx])] -= 1 + if i + 1 in cols_idx: + groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group + last_group = this_group + + return groups.T + + +def to_tree(Z, rd=False): + """ + Convert a linkage matrix into an easy-to-use tree object. + + The reference to the root `ClusterNode` object is returned (by default). + + Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, + and ``count`` attribute. The left and right attributes point to + ClusterNode objects that were combined to generate the cluster. + If both are None then the `ClusterNode` object is a leaf node, its count + must be 1, and its distance is meaningless but set to 0. + + *Note: This function is provided for the convenience of the library + user. ClusterNodes are not used as input to any of the functions in this + library.* + + Parameters + ---------- + Z : ndarray + The linkage matrix in proper form (see the `linkage` + function documentation). + rd : bool, optional + When False (default), a reference to the root `ClusterNode` object is + returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a + reference to the root node while ``d`` is a list of `ClusterNode` + objects - one per original entry in the linkage matrix plus entries + for all clustering steps. If a cluster id is + less than the number of samples ``n`` in the data that the linkage + matrix describes, then it corresponds to a singleton cluster (leaf + node). + See `linkage` for more information on the assignment of cluster ids + to clusters. + + Returns + ------- + tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) + If ``rd`` is False, a `ClusterNode`. + If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number + of samples. See the description of `rd` above for more details. + + See Also + -------- + linkage, is_valid_linkage, ClusterNode + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> x = rng.random((5, 2)) + >>> Z = hierarchy.linkage(x) + >>> hierarchy.to_tree(Z) + >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) + >>> rootnode + >> len(nodelist) + 9 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # Number of original objects is equal to the number of rows plus 1. + n = Z.shape[0] + 1 + + # Create a list full of None's to store the node objects + d = [None] * (n * 2 - 1) + + # Create the nodes corresponding to the n original objects. + for i in range(0, n): + d[i] = ClusterNode(i) + + nd = None + + for i in range(Z.shape[0]): + row = Z[i, :] + + fi = int_floor(row[0], xp) + fj = int_floor(row[1], xp) + if fi > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 0') % fi) + if fj > i + n: + raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' + 'is used before it is formed. See row %d, ' + 'column 1') % fj) + + nd = ClusterNode(i + n, d[fi], d[fj], row[2]) + # ^ id ^ left ^ right ^ dist + if row[3] != nd.count: + raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' + 'incorrect.') % i) + d[n + i] = nd + + if rd: + return (nd, d) + else: + return nd + + +def optimal_leaf_ordering(Z, y, metric='euclidean'): + """ + Given a linkage matrix Z and distance, reorder the cut tree. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a linkage matrix. See + `linkage` for more information on the return structure and + algorithm. + y : ndarray + The condensed distance matrix from which Z was generated. + Alternatively, a collection of m observation vectors in n + dimensions may be passed as an m by n array. + metric : str or function, optional + The distance metric to use in the case that y is a collection of + observation vectors; ignored otherwise. See the ``pdist`` + function for a list of valid distance metrics. A custom distance + function can also be used. + + Returns + ------- + Z_ordered : ndarray + A copy of the linkage matrix Z, reordered to minimize the distance + between adjacent leaves. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> rng = np.random.default_rng() + >>> X = rng.standard_normal((10, 10)) + >>> Z = hierarchy.ward(X) + >>> hierarchy.leaves_list(Z) + array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32) + >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) + array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32) + + """ + xp = array_namespace(Z, y) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + y = _asarray(y, order='C', dtype=xp.float64, xp=xp) + + if y.ndim == 1: + distance.is_valid_y(y, throw=True, name='y') + elif y.ndim == 2: + if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and + np.all(y >= 0) and np.allclose(y, y.T)): + warnings.warn('The symmetric non-negative hollow observation ' + 'matrix looks suspiciously like an uncondensed ' + 'distance matrix', + ClusterWarning, stacklevel=2) + y = distance.pdist(y, metric) + y = xp.asarray(y) + else: + raise ValueError("`y` must be 1 or 2 dimensional.") + + if not xp.all(xp.isfinite(y)): + raise ValueError("The condensed distance matrix must contain only " + "finite values.") + + Z = np.asarray(Z) + y = np.asarray(y) + return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y)) + + +def cophenet(Z, Y=None): + """ + Calculate the cophenetic distances between each observation in + the hierarchical clustering defined by the linkage ``Z``. + + Suppose ``p`` and ``q`` are original observations in + disjoint clusters ``s`` and ``t``, respectively and + ``s`` and ``t`` are joined by a direct parent cluster + ``u``. The cophenetic distance between observations + ``i`` and ``j`` is simply the distance between + clusters ``s`` and ``t``. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as an array + (see `linkage` function). + Y : ndarray (optional) + Calculates the cophenetic correlation coefficient ``c`` of a + hierarchical clustering defined by the linkage matrix `Z` + of a set of :math:`n` observations in :math:`m` + dimensions. `Y` is the condensed distance matrix from which + `Z` was generated. + + Returns + ------- + c : ndarray + The cophentic correlation distance (if ``Y`` is passed). + d : ndarray + The cophenetic distance matrix in condensed form. The + :math:`ij` th entry is the cophenetic distance between + original observations :math:`i` and :math:`j`. + + See Also + -------- + linkage : + for a description of what a linkage matrix is. + scipy.spatial.distance.squareform : + transforming condensed matrices into square ones. + + Examples + -------- + >>> from scipy.cluster.hierarchy import single, cophenet + >>> from scipy.spatial.distance import pdist, squareform + + Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance + between two points of ``X`` is the distance between the largest two + distinct clusters that each of the points: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + ``X`` corresponds to this dataset :: + + x x x x + x x + + x x + x x x x + + >>> Z = single(pdist(X)) + >>> Z + array([[ 0., 1., 1., 2.], + [ 2., 12., 1., 3.], + [ 3., 4., 1., 2.], + [ 5., 14., 1., 3.], + [ 6., 7., 1., 2.], + [ 8., 16., 1., 3.], + [ 9., 10., 1., 2.], + [11., 18., 1., 3.], + [13., 15., 2., 6.], + [17., 20., 2., 9.], + [19., 21., 2., 12.]]) + >>> cophenet(Z) + array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., + 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) + + The output of the `scipy.cluster.hierarchy.cophenet` method is + represented in condensed form. We can use + `scipy.spatial.distance.squareform` to see the output as a + regular matrix (where each element ``ij`` denotes the cophenetic distance + between each ``i``, ``j`` pair of points in ``X``): + + >>> squareform(cophenet(Z)) + array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], + [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) + + In this example, the cophenetic distance between points on ``X`` that are + very close (i.e., in the same corner) is 1. For other pairs of points is 2, + because the points will be located in clusters at different + corners - thus, the distance between these clusters will be larger. + + """ + xp = array_namespace(Z, Y) + # Ensure float64 C-contiguous array. Cython code doesn't deal with striding. + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + zz = np.zeros((n * (n-1)) // 2, dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.cophenetic_distances(Z, zz, int(n)) + zz = xp.asarray(zz) + if Y is None: + return zz + + Y = _asarray(Y, order='C', xp=xp) + distance.is_valid_y(Y, throw=True, name='Y') + + z = xp.mean(zz) + y = xp.mean(Y) + Yy = Y - y + Zz = zz - z + numerator = (Yy * Zz) + denomA = Yy**2 + denomB = Zz**2 + c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB)) + return (c, zz) + + +def inconsistent(Z, d=2): + r""" + Calculate inconsistency statistics on a linkage matrix. + + Parameters + ---------- + Z : ndarray + The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical + clustering). See `linkage` documentation for more information on its + form. + d : int, optional + The number of links up to `d` levels below each non-singleton cluster. + + Returns + ------- + R : ndarray + A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link + statistics for the non-singleton cluster ``i``. The link statistics are + computed over the link heights for links :math:`d` levels below the + cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard + deviation of the link heights, respectively; ``R[i,2]`` is the number + of links included in the calculation; and ``R[i,3]`` is the + inconsistency coefficient, + + .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} + + Notes + ----- + This function behaves similarly to the MATLAB(TM) ``inconsistent`` + function. + + Examples + -------- + >>> from scipy.cluster.hierarchy import inconsistent, linkage + >>> from matplotlib import pyplot as plt + >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] + >>> Z = linkage(X, 'ward') + >>> print(Z) + [[ 5. 6. 0. 2. ] + [ 2. 7. 0. 2. ] + [ 0. 4. 1. 2. ] + [ 1. 8. 1.15470054 3. ] + [ 9. 10. 2.12132034 4. ] + [ 3. 12. 4.11096096 5. ] + [11. 13. 14.07183949 8. ]] + >>> inconsistent(Z) + array([[ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 1. , 0. , 1. , 0. ], + [ 0.57735027, 0.81649658, 2. , 0.70710678], + [ 1.04044011, 1.06123822, 3. , 1.01850858], + [ 3.11614065, 1.40688837, 2. , 0.70710678], + [ 6.44583366, 6.76770586, 3. , 1.12682288]]) + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if (not d == np.floor(d)) or d < 0: + raise ValueError('The second argument d must be a nonnegative ' + 'integer value.') + + n = Z.shape[0] + 1 + R = np.zeros((n - 1, 4), dtype=np.float64) + + Z = np.asarray(Z) + _hierarchy.inconsistent(Z, R, int(n), int(d)) + R = xp.asarray(R) + return R + + +def from_mlab_linkage(Z): + """ + Convert a linkage matrix generated by MATLAB(TM) to a new + linkage matrix compatible with this module. + + The conversion does two things: + + * the indices are converted from ``1..N`` to ``0..(N-1)`` form, + and + + * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the + number of original observations (leaves) in the non-singleton + cluster ``i``. + + This function is useful when loading in linkages from legacy data + files generated by MATLAB. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by MATLAB(TM). + + Returns + ------- + ZS : ndarray + A linkage matrix compatible with ``scipy.cluster.hierarchy``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + to_mlab_linkage : transform from SciPy to MATLAB format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage + + Given a linkage matrix in MATLAB format ``mZ``, we can use + `scipy.cluster.hierarchy.from_mlab_linkage` to import + it into SciPy format: + + >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1], + ... [10, 11, 1], [3, 13, 1.29099445], + ... [6, 14, 1.29099445], + ... [9, 15, 1.29099445], + ... [12, 16, 1.29099445], + ... [17, 18, 5.77350269], + ... [19, 20, 5.77350269], + ... [21, 22, 8.16496581]]) + + >>> Z = from_mlab_linkage(mZ) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [ 11. , 15. , 1.29099445, 3. ], + [ 16. , 17. , 5.77350269, 6. ], + [ 18. , 19. , 5.77350269, 6. ], + [ 20. , 21. , 8.16496581, 12. ]]) + + As expected, the linkage matrix ``Z`` returned includes an + additional column counting the number of original samples in + each cluster. Also, all cluster indices are reduced by 1 + (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing). + + """ + xp = array_namespace(Z) + Z = _asarray(Z, dtype=xp.float64, order='C', xp=xp) + Zs = Z.shape + + # If it's empty, return it. + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + + if len(Zs) != 2: + raise ValueError("The linkage array must be rectangular.") + + # If it contains no rows, return it. + if Zs[0] == 0: + return copy(Z, xp=xp) + + Zpart = copy(Z, xp=xp) + if xp.min(Zpart[:, 0:2]) != 1.0 and xp.max(Zpart[:, 0:2]) != 2 * Zs[0]: + raise ValueError('The format of the indices is not 1..N') + + Zpart[:, 0:2] -= 1.0 + CS = np.zeros((Zs[0],), dtype=np.float64) + Zpart = np.asarray(Zpart) + _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) + res = np.hstack([Zpart, CS.reshape(Zs[0], 1)]) + return xp.asarray(res) + + +def to_mlab_linkage(Z): + """ + Convert a linkage matrix to a MATLAB(TM) compatible one. + + Converts a linkage matrix ``Z`` generated by the linkage function + of this module to a MATLAB(TM) compatible one. The return linkage + matrix has the last column removed and the cluster indices are + converted to ``1..N`` indexing. + + Parameters + ---------- + Z : ndarray + A linkage matrix generated by ``scipy.cluster.hierarchy``. + + Returns + ------- + to_mlab_linkage : ndarray + A linkage matrix compatible with MATLAB(TM)'s hierarchical + clustering functions. + + The return linkage matrix has the last column removed + and the cluster indices are converted to ``1..N`` indexing. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + from_mlab_linkage : transform from Matlab to SciPy format. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + After a linkage matrix ``Z`` has been created, we can use + `scipy.cluster.hierarchy.to_mlab_linkage` to convert it + into MATLAB format: + + >>> mZ = to_mlab_linkage(Z) + >>> mZ + array([[ 1. , 2. , 1. ], + [ 4. , 5. , 1. ], + [ 7. , 8. , 1. ], + [ 10. , 11. , 1. ], + [ 3. , 13. , 1.29099445], + [ 6. , 14. , 1.29099445], + [ 9. , 15. , 1.29099445], + [ 12. , 16. , 1.29099445], + [ 17. , 18. , 5.77350269], + [ 19. , 20. , 5.77350269], + [ 21. , 22. , 8.16496581]]) + + The new linkage matrix ``mZ`` uses 1-indexing for all the + clusters (instead of 0-indexing). Also, the last column of + the original linkage matrix has been dropped. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + Zs = Z.shape + if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): + return copy(Z, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + ZP = copy(Z[:, 0:3], xp=xp) + ZP[:, 0:2] += 1.0 + + return ZP + + +def is_monotonic(Z): + """ + Return True if the linkage passed is monotonic. + + The linkage is monotonic if for every cluster :math:`s` and :math:`t` + joined, the distance between them is no less than the distance + between any previously joined clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix to check for monotonicity. + + Returns + ------- + b : bool + A boolean indicating whether the linkage is monotonic. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, ward, is_monotonic + >>> from scipy.spatial.distance import pdist + + By definition, some hierarchical clustering algorithms - such as + `scipy.cluster.hierarchy.ward` - produce monotonic assignments of + samples to clusters; however, this is not always true for other + hierarchical methods - e.g. `scipy.cluster.hierarchy.median`. + + Given a linkage matrix ``Z`` (as the result of a hierarchical clustering + method) we can test programmatically whether it has the monotonicity + property or not, using `scipy.cluster.hierarchy.is_monotonic`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_monotonic(Z) + True + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> is_monotonic(Z) + False + + Note that this method is equivalent to just verifying that the distances + in the third column of the linkage matrix appear in a monotonically + increasing order. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + # We expect the i'th value to be greater than its successor. + return xp.all(Z[1:, 2] >= Z[:-1, 2]) + + +def is_valid_im(R, warning=False, throw=False, name=None): + """Return True if the inconsistency matrix passed is valid. + + It must be a :math:`n` by 4 array of doubles. The standard + deviations ``R[:,1]`` must be nonnegative. The link counts + ``R[:,2]`` must be positive and no greater than :math:`n-1`. + + Parameters + ---------- + R : ndarray + The inconsistency matrix to check for validity. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [1.14549722, 0.20576415, 2. , 0.70710678], + [2.78516386, 2.58797734, 3. , 1.15470054], + [2.78516386, 2.58797734, 3. , 1.15470054], + [6.57065706, 1.38071187, 3. , 1.15470054]]) + + Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that + ``R`` is correct: + + >>> is_valid_im(R) + True + + However, if ``R`` is wrongly constructed (e.g., one of the standard + deviations is set to a negative value), then the check will fail: + + >>> R[-1,1] = R[-1,1] * -1 + >>> is_valid_im(R) + False + + """ + xp = array_namespace(R) + R = _asarray(R, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if R.dtype != xp.float64: + raise TypeError('Inconsistency matrix %smust contain doubles ' + '(double).' % name_str) + if len(R.shape) != 2: + raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' + 'be two-dimensional).' % name_str) + if R.shape[1] != 4: + raise ValueError('Inconsistency matrix %smust have 4 columns.' % + name_str) + if R.shape[0] < 1: + raise ValueError('Inconsistency matrix %smust have at least one ' + 'row.' % name_str) + if xp.any(R[:, 0] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height means.' % name_str) + if xp.any(R[:, 1] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'height standard deviations.' % name_str) + if xp.any(R[:, 2] < 0): + raise ValueError('Inconsistency matrix %scontains negative link ' + 'counts.' % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def is_valid_linkage(Z, warning=False, throw=False, name=None): + """ + Check the validity of a linkage matrix. + + A linkage matrix is valid if it is a 2-D array (type double) + with :math:`n` rows and 4 columns. The first two columns must contain + indices between 0 and :math:`2n-1`. For a given row ``i``, the following + two expressions have to hold: + + .. math:: + + 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 + 0 \\leq Z[i,1] \\leq i+n-1 + + I.e., a cluster cannot join another cluster unless the cluster being joined + has been generated. + + Parameters + ---------- + Z : array_like + Linkage matrix. + warning : bool, optional + When True, issues a Python warning if the linkage + matrix passed is invalid. + throw : bool, optional + When True, throws a Python exception if the linkage + matrix passed is invalid. + name : str, optional + This string refers to the variable name of the invalid + linkage matrix. + + Returns + ------- + b : bool + True if the inconsistency matrix is valid. + + See Also + -------- + linkage: for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, is_valid_linkage + >>> from scipy.spatial.distance import pdist + + All linkage matrices generated by the clustering methods in this module + will be valid (i.e., they will have the appropriate dimensions and the two + required expressions will hold for all the rows). + + We can check this using `scipy.cluster.hierarchy.is_valid_linkage`: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + >>> is_valid_linkage(Z) + True + + However, if we create a linkage matrix in a wrong way - or if we modify + a valid one in a way that any of the required expressions don't hold + anymore, then the check will fail: + + >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point + >>> is_valid_linkage(Z) + False + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + valid = True + name_str = "%r " % name if name else '' + try: + if Z.dtype != xp.float64: + raise TypeError('Linkage matrix %smust contain doubles.' % name_str) + if len(Z.shape) != 2: + raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' + 'two-dimensional).' % name_str) + if Z.shape[1] != 4: + raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) + if Z.shape[0] == 0: + raise ValueError('Linkage must be computed on at least two ' + 'observations.') + n = Z.shape[0] + if n > 1: + if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)): + raise ValueError('Linkage %scontains negative indices.' % + name_str) + if xp.any(Z[:, 2] < 0): + raise ValueError('Linkage %scontains negative distances.' % + name_str) + if xp.any(Z[:, 3] < 0): + raise ValueError('Linkage %scontains negative counts.' % + name_str) + if _check_hierarchy_uses_cluster_before_formed(Z): + raise ValueError('Linkage %suses non-singleton cluster before ' + 'it is formed.' % name_str) + if _check_hierarchy_uses_cluster_more_than_once(Z): + raise ValueError('Linkage %suses the same cluster more than once.' + % name_str) + except Exception as e: + if throw: + raise + if warning: + _warning(str(e)) + valid = False + + return valid + + +def _check_hierarchy_uses_cluster_before_formed(Z): + n = Z.shape[0] + 1 + for i in range(0, n - 1): + if Z[i, 0] >= n + i or Z[i, 1] >= n + i: + return True + return False + + +def _check_hierarchy_uses_cluster_more_than_once(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + used_more_than_once = ( + (float(Z[i, 0]) in chosen) + or (float(Z[i, 1]) in chosen) + or Z[i, 0] == Z[i, 1] + ) + if used_more_than_once: + return True + chosen.add(float(Z[i, 0])) + chosen.add(float(Z[i, 1])) + return False + + +def _check_hierarchy_not_all_clusters_used(Z): + n = Z.shape[0] + 1 + chosen = set() + for i in range(0, n - 1): + chosen.add(int(Z[i, 0])) + chosen.add(int(Z[i, 1])) + must_chosen = set(range(0, 2 * n - 2)) + return len(must_chosen.difference(chosen)) > 0 + + +def num_obs_linkage(Z): + """ + Return the number of original observations of the linkage matrix passed. + + Parameters + ---------- + Z : ndarray + The linkage matrix on which to perform the operation. + + Returns + ------- + n : int + The number of original observations in the linkage. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, num_obs_linkage + >>> from scipy.spatial.distance import pdist + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + ``Z`` is a linkage matrix obtained after using the Ward clustering method + with ``X``, a dataset with 12 data points. + + >>> num_obs_linkage(Z) + 12 + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + return (Z.shape[0] + 1) + + +def correspond(Z, Y): + """ + Check for correspondence between linkage and condensed distance matrices. + + They must have the same number of original observations for + the check to succeed. + + This function is useful as a sanity check in algorithms that make + extensive use of linkage and distance matrices that must + correspond to the same set of original observations. + + Parameters + ---------- + Z : array_like + The linkage matrix to check for correspondence. + Y : array_like + The condensed distance matrix to check for correspondence. + + Returns + ------- + b : bool + A boolean indicating whether the linkage matrix and distance + matrix could possibly correspond to one another. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, correspond + >>> from scipy.spatial.distance import pdist + + This method can be used to check if a given linkage matrix ``Z`` has been + obtained from the application of a cluster method over a dataset ``X``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + >>> X_condensed = pdist(X) + >>> Z = ward(X_condensed) + + Here, we can compare ``Z`` and ``X`` (in condensed form): + + >>> correspond(Z, X_condensed) + True + + """ + is_valid_linkage(Z, throw=True) + distance.is_valid_y(Y, throw=True) + xp = array_namespace(Z, Y) + Z = _asarray(Z, order='c', xp=xp) + Y = _asarray(Y, order='c', xp=xp) + return distance.num_obs_y(Y) == num_obs_linkage(Z) + + +def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): + """ + Form flat clusters from the hierarchical clustering defined by + the given linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded with the matrix returned + by the `linkage` function. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + The criterion to use in forming flat clusters. This can + be any of the following values: + + ``inconsistent`` : + If a cluster node and all its + descendants have an inconsistent value less than or equal + to `t`, then all its leaf descendants belong to the + same flat cluster. When no non-singleton cluster meets + this criterion, every node is assigned to its own + cluster. (Default) + + ``distance`` : + Forms flat clusters so that the original + observations in each flat cluster have no greater a + cophenetic distance than `t`. + + ``maxclust`` : + Finds a minimum threshold ``r`` so that + the cophenetic distance between any two original + observations in the same flat cluster is no more than + ``r`` and no more than `t` flat clusters are formed. + + ``monocrit`` : + Forms a flat cluster from a cluster node c + with index i when ``monocrit[j] <= t``. + + For example, to threshold on the maximum mean distance + as computed in the inconsistency matrix R with a + threshold of 0.8 do:: + + MR = maxRstat(Z, R, 3) + fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR) + + ``maxclust_monocrit`` : + Forms a flat cluster from a + non-singleton cluster node ``c`` when ``monocrit[i] <= + r`` for all cluster indices ``i`` below and including + ``c``. ``r`` is minimized such that no more than ``t`` + flat clusters are formed. monocrit must be + monotonic. For example, to minimize the threshold t on + maximum inconsistency values so that no more than 3 flat + clusters are formed, do:: + + MI = maxinconsts(Z, R) + fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) + depth : int, optional + The maximum depth to perform the inconsistency calculation. + It has no meaning for the other criteria. Default is 2. + R : ndarray, optional + The inconsistency matrix to use for the ``'inconsistent'`` + criterion. This matrix is computed if not provided. + monocrit : ndarray, optional + An array of length n-1. `monocrit[i]` is the + statistics upon which non-singleton i is thresholded. The + monocrit vector must be monotonic, i.e., given a node c with + index i, for all node indices j corresponding to nodes + below c, ``monocrit[i] >= monocrit[j]``. + + Returns + ------- + fcluster : ndarray + An array of length ``n``. ``T[i]`` is the flat cluster number to + which original observation ``i`` belongs. + + See Also + -------- + linkage : for information about hierarchical clustering methods work. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster + >>> from scipy.spatial.distance import pdist + + All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward` + generate a linkage matrix ``Z`` as their output: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + This matrix represents a dendrogram, where the first and second elements + are the two clusters merged at each step, the third element is the + distance between these clusters, and the fourth element is the size of + the new cluster - the number of original data points included. + + `scipy.cluster.hierarchy.fcluster` can be used to flatten the + dendrogram, obtaining as a result an assignation of the original data + points to single clusters. + + This assignation mostly depends on a distance threshold ``t`` - the maximum + inter-cluster distance allowed: + + >>> fcluster(Z, t=0.9, criterion='distance') + array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32) + + >>> fcluster(Z, t=1.1, criterion='distance') + array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32) + + >>> fcluster(Z, t=3, criterion='distance') + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + >>> fcluster(Z, t=9, criterion='distance') + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) + + In the first case, the threshold ``t`` is too small to allow any two + samples in the data to form a cluster, so 12 different clusters are + returned. + + In the second case, the threshold is large enough to allow the first + 4 points to be merged with their nearest neighbors. So, here, only 8 + clusters are returned. + + The third case, with a much higher threshold, allows for up to 8 data + points to be connected - so 4 clusters are returned here. + + Lastly, the threshold of the fourth case is large enough to allow for + all data points to be merged together - so a single cluster is returned. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + T = np.zeros((n,), dtype='i') + + if monocrit is not None: + monocrit = np.asarray(monocrit, order='C', dtype=np.float64) + + Z = np.asarray(Z) + monocrit = np.asarray(monocrit) + if criterion == 'inconsistent': + if R is None: + R = inconsistent(Z, depth) + else: + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_im(R, throw=True, name='R') + # Since the C code does not support striding using strides. + # The dimensions are used instead. + R = np.asarray(R) + _hierarchy.cluster_in(Z, R, T, float(t), int(n)) + elif criterion == 'distance': + _hierarchy.cluster_dist(Z, T, float(t), int(n)) + elif criterion == 'maxclust': + _hierarchy.cluster_maxclust_dist(Z, T, int(n), t) + elif criterion == 'monocrit': + _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) + elif criterion == 'maxclust_monocrit': + _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) + else: + raise ValueError('Invalid cluster formation criterion: %s' + % str(criterion)) + return xp.asarray(T) + + +def fclusterdata(X, t, criterion='inconsistent', + metric='euclidean', depth=2, method='single', R=None): + """ + Cluster observation data using a given metric. + + Clusters the original observations in the n-by-m data + matrix X (n observations in m dimensions), using the euclidean + distance metric to calculate distances between original observations, + performs hierarchical clustering using the single linkage algorithm, + and forms flat clusters using the inconsistency method with `t` as the + cut-off threshold. + + A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is + the index of the flat cluster to which the original observation ``i`` + belongs. + + Parameters + ---------- + X : (N, M) ndarray + N by M data matrix with N observations in M dimensions. + t : scalar + For criteria 'inconsistent', 'distance' or 'monocrit', + this is the threshold to apply when forming flat clusters. + For 'maxclust' or 'maxclust_monocrit' criteria, + this would be max number of clusters requested. + criterion : str, optional + Specifies the criterion for forming flat clusters. Valid + values are 'inconsistent' (default), 'distance', or 'maxclust' + cluster formation algorithms. See `fcluster` for descriptions. + metric : str or function, optional + The distance metric for calculating pairwise distances. See + ``distance.pdist`` for descriptions and linkage to verify + compatibility with the linkage method. + depth : int, optional + The maximum depth for the inconsistency calculation. See + `inconsistent` for more information. + method : str, optional + The linkage method to use (single, complete, average, + weighted, median centroid, ward). See `linkage` for more + information. Default is "single". + R : ndarray, optional + The inconsistency matrix. It will be computed if necessary + if it is not passed. + + Returns + ------- + fclusterdata : ndarray + A vector of length n. T[i] is the flat cluster number to + which original observation i belongs. + + See Also + -------- + scipy.spatial.distance.pdist : pairwise distance metrics + + Notes + ----- + This function is similar to the MATLAB function ``clusterdata``. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fclusterdata + + This is a convenience method that abstracts all the steps to perform in a + typical SciPy's hierarchical clustering workflow. + + * Transform the input data into a condensed matrix with + `scipy.spatial.distance.pdist`. + + * Apply a clustering method. + + * Obtain flat clusters at a user defined distance threshold ``t`` using + `scipy.cluster.hierarchy.fcluster`. + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> fclusterdata(X, t=1) + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + The output here (for the dataset ``X``, distance threshold ``t``, and the + default settings) is four clusters with three data points each. + + """ + xp = array_namespace(X) + X = _asarray(X, order='C', dtype=xp.float64, xp=xp) + + if X.ndim != 2: + raise TypeError('The observation matrix X must be an n by m ' + 'array.') + + Y = distance.pdist(X, metric=metric) + Y = xp.asarray(Y) + Z = linkage(Y, method=method) + if R is None: + R = inconsistent(Z, d=depth) + else: + R = _asarray(R, order='c', xp=xp) + T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) + return T + + +def leaves_list(Z): + """ + Return a list of leaf node ids. + + The return corresponds to the observation vector index as it appears + in the tree from left to right. Z is a linkage matrix. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. `Z` is + a linkage matrix. See `linkage` for more information. + + Returns + ------- + leaves_list : ndarray + The list of leaf node ids. + + See Also + -------- + dendrogram : for information about dendrogram structure. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list + >>> from scipy.spatial.distance import pdist + >>> from matplotlib import pyplot as plt + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + + The linkage matrix ``Z`` represents a dendrogram, that is, a tree that + encodes the structure of the clustering performed. + `scipy.cluster.hierarchy.leaves_list` shows the mapping between + indices in the ``X`` dataset and leaves in the dendrogram: + + >>> leaves_list(Z) + array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32) + + >>> fig = plt.figure(figsize=(25, 10)) + >>> dn = dendrogram(Z) + >>> plt.show() + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + n = Z.shape[0] + 1 + ML = np.zeros((n,), dtype='i') + Z = np.asarray(Z) + _hierarchy.prelist(Z, ML, n) + return xp.asarray(ML) + + +# Maps number of leaves to text size. +# +# p <= 20, size="12" +# 20 < p <= 30, size="10" +# 30 < p <= 50, size="8" +# 50 < p <= np.inf, size="6" + +_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} +_drotation = {20: 0, 40: 45, np.inf: 90} +_dtextsortedkeys = list(_dtextsizes.keys()) +_dtextsortedkeys.sort() +_drotationsortedkeys = list(_drotation.keys()) +_drotationsortedkeys.sort() + + +def _remove_dups(L): + """ + Remove duplicates AND preserve the original order of the elements. + + The set class is not guaranteed to do this. + """ + seen_before = set() + L2 = [] + for i in L: + if i not in seen_before: + seen_before.add(i) + L2.append(i) + return L2 + + +def _get_tick_text_size(p): + for k in _dtextsortedkeys: + if p <= k: + return _dtextsizes[k] + + +def _get_tick_rotation(p): + for k in _drotationsortedkeys: + if p <= k: + return _drotation[k] + + +def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, + no_labels, color_list, leaf_font_size=None, + leaf_rotation=None, contraction_marks=None, + ax=None, above_threshold_color='C0'): + # Import matplotlib here so that it's not imported unless dendrograms + # are plotted. Raise an informative error if importing fails. + try: + # if an axis is provided, don't use pylab at all + if ax is None: + import matplotlib.pylab + import matplotlib.patches + import matplotlib.collections + except ImportError as e: + raise ImportError("You must install the matplotlib library to plot " + "the dendrogram. Use no_plot=True to calculate the " + "dendrogram without plotting.") from e + + if ax is None: + ax = matplotlib.pylab.gca() + # if we're using pylab, we want to trigger a draw at the end + trigger_redraw = True + else: + trigger_redraw = False + + # Independent variable plot width + ivw = len(ivl) * 10 + # Dependent variable plot height + dvw = mh + mh * 0.05 + + iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) + if orientation in ('top', 'bottom'): + if orientation == 'top': + ax.set_ylim([0, dvw]) + ax.set_xlim([0, ivw]) + else: + ax.set_ylim([dvw, 0]) + ax.set_xlim([0, ivw]) + + xlines = icoords + ylines = dcoords + if no_labels: + ax.set_xticks([]) + ax.set_xticklabels([]) + else: + ax.set_xticks(iv_ticks) + + if orientation == 'top': + ax.xaxis.set_ticks_position('bottom') + else: + ax.xaxis.set_ticks_position('top') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_xticklines(): + line.set_visible(False) + + leaf_rot = (float(_get_tick_rotation(len(ivl))) + if (leaf_rotation is None) else leaf_rotation) + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) + + elif orientation in ('left', 'right'): + if orientation == 'left': + ax.set_xlim([dvw, 0]) + ax.set_ylim([0, ivw]) + else: + ax.set_xlim([0, dvw]) + ax.set_ylim([0, ivw]) + + xlines = dcoords + ylines = icoords + if no_labels: + ax.set_yticks([]) + ax.set_yticklabels([]) + else: + ax.set_yticks(iv_ticks) + + if orientation == 'left': + ax.yaxis.set_ticks_position('right') + else: + ax.yaxis.set_ticks_position('left') + + # Make the tick marks invisible because they cover up the links + for line in ax.get_yticklines(): + line.set_visible(False) + + leaf_font = (float(_get_tick_text_size(len(ivl))) + if (leaf_font_size is None) else leaf_font_size) + + if leaf_rotation is not None: + ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) + else: + ax.set_yticklabels(ivl, size=leaf_font) + + # Let's use collections instead. This way there is a separate legend item + # for each tree grouping, rather than stupidly one for each line segment. + colors_used = _remove_dups(color_list) + color_to_lines = {} + for color in colors_used: + color_to_lines[color] = [] + for (xline, yline, color) in zip(xlines, ylines, color_list): + color_to_lines[color].append(list(zip(xline, yline))) + + colors_to_collections = {} + # Construct the collections. + for color in colors_used: + coll = matplotlib.collections.LineCollection(color_to_lines[color], + colors=(color,)) + colors_to_collections[color] = coll + + # Add all the groupings below the color threshold. + for color in colors_used: + if color != above_threshold_color: + ax.add_collection(colors_to_collections[color]) + # If there's a grouping of links above the color threshold, it goes last. + if above_threshold_color in colors_to_collections: + ax.add_collection(colors_to_collections[above_threshold_color]) + + if contraction_marks is not None: + Ellipse = matplotlib.patches.Ellipse + for (x, y) in contraction_marks: + if orientation in ('left', 'right'): + e = Ellipse((y, x), width=dvw / 100, height=1.0) + else: + e = Ellipse((x, y), width=1.0, height=dvw / 100) + ax.add_artist(e) + e.set_clip_box(ax.bbox) + e.set_alpha(0.5) + e.set_facecolor('k') + + if trigger_redraw: + matplotlib.pylab.draw_if_interactive() + + +# C0 is used for above threshold color +_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9') +_link_line_colors = list(_link_line_colors_default) + + +def set_link_color_palette(palette): + """ + Set list of matplotlib color codes for use by dendrogram. + + Note that this palette is global (i.e., setting it once changes the colors + for all subsequent calls to `dendrogram`) and that it affects only the + the colors below ``color_threshold``. + + Note that `dendrogram` also accepts a custom coloring function through its + ``link_color_func`` keyword, which is more flexible and non-global. + + Parameters + ---------- + palette : list of str or None + A list of matplotlib color codes. The order of the color codes is the + order in which the colors are cycled through when color thresholding in + the dendrogram. + + If ``None``, resets the palette to its default (which are matplotlib + default colors C1 to C9). + + Returns + ------- + None + + See Also + -------- + dendrogram + + Notes + ----- + Ability to reset the palette with ``None`` added in SciPy 0.17.0. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> dn = hierarchy.dendrogram(Z, no_plot=True) + >>> dn['color_list'] + ['C1', 'C0', 'C0', 'C0', 'C0'] + >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) + >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b') + >>> dn['color_list'] + ['c', 'b', 'b', 'b', 'b'] + >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, + ... above_threshold_color='k') + >>> dn['color_list'] + ['c', 'm', 'm', 'k', 'k'] + + Now, reset the color palette to its default: + + >>> hierarchy.set_link_color_palette(None) + + """ + if palette is None: + # reset to its default + palette = _link_line_colors_default + elif not isinstance(palette, (list, tuple)): + raise TypeError("palette must be a list or tuple") + _ptypes = [isinstance(p, str) for p in palette] + + if False in _ptypes: + raise TypeError("all palette list elements must be color strings") + + global _link_line_colors + _link_line_colors = palette + + +def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, + get_leaves=True, orientation='top', labels=None, + count_sort=False, distance_sort=False, show_leaf_counts=True, + no_plot=False, no_labels=False, leaf_font_size=None, + leaf_rotation=None, leaf_label_func=None, + show_contracted=False, link_color_func=None, ax=None, + above_threshold_color='C0'): + """ + Plot the hierarchical clustering as a dendrogram. + + The dendrogram illustrates how each cluster is + composed by drawing a U-shaped link between a non-singleton + cluster and its children. The top of the U-link indicates a + cluster merge. The two legs of the U-link indicate which clusters + were merged. The length of the two legs of the U-link represents + the distance between the child clusters. It is also the + cophenetic distance between original observations in the two + children clusters. + + Parameters + ---------- + Z : ndarray + The linkage matrix encoding the hierarchical clustering to + render as a dendrogram. See the ``linkage`` function for more + information on the format of ``Z``. + p : int, optional + The ``p`` parameter for ``truncate_mode``. + truncate_mode : str, optional + The dendrogram can be hard to read when the original + observation matrix from which the linkage is derived is + large. Truncation is used to condense the dendrogram. There + are several modes: + + ``None`` + No truncation is performed (default). + Note: ``'none'`` is an alias for ``None`` that's kept for + backward compatibility. + + ``'lastp'`` + The last ``p`` non-singleton clusters formed in the linkage are the + only non-leaf nodes in the linkage; they correspond to rows + ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are + contracted into leaf nodes. + + ``'level'`` + No more than ``p`` levels of the dendrogram tree are displayed. + A "level" includes all nodes with ``p`` merges from the final merge. + + Note: ``'mtica'`` is an alias for ``'level'`` that's kept for + backward compatibility. + + color_threshold : double, optional + For brevity, let :math:`t` be the ``color_threshold``. + Colors all the descendent links below a cluster node + :math:`k` the same color if :math:`k` is the first node below + the cut threshold :math:`t`. All links connecting nodes with + distances greater than or equal to the threshold are colored + with de default matplotlib color ``'C0'``. If :math:`t` is less + than or equal to zero, all nodes are colored ``'C0'``. + If ``color_threshold`` is None or 'default', + corresponding with MATLAB(TM) behavior, the threshold is set to + ``0.7*max(Z[:,2])``. + + get_leaves : bool, optional + Includes a list ``R['leaves']=H`` in the result + dictionary. For each :math:`i`, ``H[i] == j``, cluster node + ``j`` appears in position ``i`` in the left-to-right traversal + of the leaves, where :math:`j < 2n-1` and :math:`i < n`. + orientation : str, optional + The direction to plot the dendrogram, which can be any + of the following strings: + + ``'top'`` + Plots the root at the top, and plot descendent links going downwards. + (default). + + ``'bottom'`` + Plots the root at the bottom, and plot descendent links going + upwards. + + ``'left'`` + Plots the root at the left, and plot descendent links going right. + + ``'right'`` + Plots the root at the right, and plot descendent links going left. + + labels : ndarray, optional + By default, ``labels`` is None so the index of the original observation + is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized + sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the + text to put under the :math:`i` th leaf node only if it corresponds to + an original observation and not a non-singleton cluster. + count_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum number of original objects in its cluster + is plotted first. + + ``'descending'`` + The child with the maximum number of original objects in its cluster + is plotted first. + + Note, ``distance_sort`` and ``count_sort`` cannot both be True. + distance_sort : str or bool, optional + For each node n, the order (visually, from left-to-right) n's + two descendent links are plotted is determined by this + parameter, which can be any of the following values: + + ``False`` + Nothing is done. + + ``'ascending'`` or ``True`` + The child with the minimum distance between its direct descendents is + plotted first. + + ``'descending'`` + The child with the maximum distance between its direct descendents is + plotted first. + + Note ``distance_sort`` and ``count_sort`` cannot both be True. + show_leaf_counts : bool, optional + When True, leaf nodes representing :math:`k>1` original + observation are labeled with the number of observations they + contain in parentheses. + no_plot : bool, optional + When True, the final rendering is not performed. This is + useful if only the data structures computed for the rendering + are needed or if matplotlib is not available. + no_labels : bool, optional + When True, no labels appear next to the leaf nodes in the + rendering of the dendrogram. + leaf_rotation : double, optional + Specifies the angle (in degrees) to rotate the leaf + labels. When unspecified, the rotation is based on the number of + nodes in the dendrogram (default is 0). + leaf_font_size : int, optional + Specifies the font size (in points) of the leaf labels. When + unspecified, the size based on the number of nodes in the + dendrogram. + leaf_label_func : lambda or function, optional + When ``leaf_label_func`` is a callable function, for each + leaf with cluster index :math:`k < 2n-1`. The function + is expected to return a string with the label for the + leaf. + + Indices :math:`k < n` correspond to original observations + while indices :math:`k \\geq n` correspond to non-singleton + clusters. + + For example, to label singletons with their node id and + non-singletons with their id, count, and inconsistency + coefficient, simply do:: + + # First define the leaf label function. + def llf(id): + if id < n: + return str(id) + else: + return '[%d %d %1.2f]' % (id, count, R[n-id,3]) + + # The text for the leaf nodes is going to be big so force + # a rotation of 90 degrees. + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) + + # leaf_label_func can also be used together with ``truncate_mode``, + # in which case you will get your leaves labeled after truncation: + dendrogram(Z, leaf_label_func=llf, leaf_rotation=90, + truncate_mode='level', p=2) + + show_contracted : bool, optional + When True the heights of non-singleton nodes contracted + into a leaf node are plotted as crosses along the link + connecting that leaf node. This really is only useful when + truncation is used (see ``truncate_mode`` parameter). + link_color_func : callable, optional + If given, `link_color_function` is called with each non-singleton id + corresponding to each U-shaped link it will paint. The function is + expected to return the color to paint the link, encoded as a matplotlib + color string code. For example:: + + dendrogram(Z, link_color_func=lambda k: colors[k]) + + colors the direct links below each untruncated non-singleton node + ``k`` using ``colors[k]``. + ax : matplotlib Axes instance, optional + If None and `no_plot` is not True, the dendrogram will be plotted + on the current axes. Otherwise if `no_plot` is not True the + dendrogram will be plotted on the given ``Axes`` instance. This can be + useful if the dendrogram is part of a more complex figure. + above_threshold_color : str, optional + This matplotlib color string sets the color of the links above the + color_threshold. The default is ``'C0'``. + + Returns + ------- + R : dict + A dictionary of data structures computed to render the + dendrogram. Its has the following keys: + + ``'color_list'`` + A list of color names. The k'th element represents the color of the + k'th link. + + ``'icoord'`` and ``'dcoord'`` + Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` + where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` + where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is + ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. + + ``'ivl'`` + A list of labels corresponding to the leaf nodes. + + ``'leaves'`` + For each i, ``H[i] == j``, cluster node ``j`` appears in position + ``i`` in the left-to-right traversal of the leaves, where + :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the + ``i``-th leaf node corresponds to an original observation. + Otherwise, it corresponds to a non-singleton cluster. + + ``'leaves_color_list'`` + A list of color names. The k'th element represents the color of the + k'th leaf. + + See Also + -------- + linkage, set_link_color_palette + + Notes + ----- + It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise + crossings appear in the dendrogram. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster import hierarchy + >>> import matplotlib.pyplot as plt + + A very basic example: + + >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., + ... 400., 754., 564., 138., 219., 869., 669.]) + >>> Z = hierarchy.linkage(ytdist, 'single') + >>> plt.figure() + >>> dn = hierarchy.dendrogram(Z) + + Now, plot in given axes, improve the color scheme and use both vertical and + horizontal orientations: + + >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) + >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) + >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', + ... orientation='top') + >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], + ... above_threshold_color='#bcbddc', + ... orientation='right') + >>> hierarchy.set_link_color_palette(None) # reset to default after use + >>> plt.show() + + """ + # This feature was thought about but never implemented (still useful?): + # + # ... = dendrogram(..., leaves_order=None) + # + # Plots the leaves in the order specified by a vector of + # original observation indices. If the vector contains duplicates + # or results in a crossing, an exception will be thrown. Passing + # None orders leaf nodes based on the order they appear in the + # pre-order traversal. + xp = array_namespace(Z) + Z = _asarray(Z, order='c', xp=xp) + + if orientation not in ["top", "left", "bottom", "right"]: + raise ValueError("orientation must be one of 'top', 'left', " + "'bottom', or 'right'") + + if labels is not None: + try: + len_labels = len(labels) + except (TypeError, AttributeError): + len_labels = labels.shape[0] + if Z.shape[0] + 1 != len_labels: + raise ValueError("Dimensions of Z and labels must be consistent.") + + is_valid_linkage(Z, throw=True, name='Z') + Zs = Z.shape + n = Zs[0] + 1 + if isinstance(p, (int, float)): + p = int(p) + else: + raise TypeError('The second argument must be a number') + + if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None): + # 'mtica' is kept working for backwards compat. + raise ValueError('Invalid truncation mode.') + + if truncate_mode == 'lastp': + if p > n or p == 0: + p = n + + if truncate_mode == 'mtica': + # 'mtica' is an alias + truncate_mode = 'level' + + if truncate_mode == 'level': + if p <= 0: + p = np.inf + + if get_leaves: + lvs = [] + else: + lvs = None + + icoord_list = [] + dcoord_list = [] + color_list = [] + current_color = [0] + currently_below_threshold = [False] + ivl = [] # list of leaves + + if color_threshold is None or (isinstance(color_threshold, str) and + color_threshold == 'default'): + color_threshold = max(Z[:, 2]) * 0.7 + + R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, + 'leaves': lvs, 'color_list': color_list} + + # Empty list will be filled in _dendrogram_calculate_info + contraction_marks = [] if show_contracted else None + + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=2*n - 2, + iv=0.0, + ivl=ivl, + n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, + lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + if not no_plot: + mh = max(Z[:, 2]) + _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, + no_labels, color_list, + leaf_font_size=leaf_font_size, + leaf_rotation=leaf_rotation, + contraction_marks=contraction_marks, + ax=ax, + above_threshold_color=above_threshold_color) + + R["leaves_color_list"] = _get_leaves_color_list(R) + + return R + + +def _get_leaves_color_list(R): + leaves_color_list = [None] * len(R['leaves']) + for link_x, link_y, link_color in zip(R['icoord'], + R['dcoord'], + R['color_list']): + for (xi, yi) in zip(link_x, link_y): + if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1): + # if yi is 0.0 and xi is divisible by 5 and odd, + # the point is a leaf + # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`) + # index of leaves are 0, 1, 2, 3, ... as below + leaf_index = (int(xi) - 5) // 10 + # each leaf has a same color of its link. + leaves_color_list[leaf_index] = link_color + return leaves_color_list + + +def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + + # If leaf node labels are to be displayed... + if ivl is not None: + # If a leaf_label_func has been provided, the label comes from the + # string returned from the leaf_label_func, which is a function + # passed to dendrogram. + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + # Otherwise, if the dendrogram caller has passed a labels list + # for the leaf nodes, use it. + if labels is not None: + ivl.append(labels[int(i - n)]) + else: + # Otherwise, use the id as the label for the leaf.x + ivl.append(str(int(i))) + + +def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, + i, labels, show_leaf_counts): + # If the leaf id structure is not None and is a list then the caller + # to dendrogram has indicated that cluster id's corresponding to the + # leaf nodes should be recorded. + + if lvs is not None: + lvs.append(int(i)) + if ivl is not None: + if leaf_label_func: + ivl.append(leaf_label_func(int(i))) + else: + if show_leaf_counts: + ivl.append("(" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + ")") + else: + ivl.append("") + + +def _append_contraction_marks(Z, iv, i, n, contraction_marks, xp): + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp): + if i >= n: + contraction_marks.append((iv, Z[i - n, 2])) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp), + n, contraction_marks, xp) + _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp), + n, contraction_marks, xp) + + +def _dendrogram_calculate_info(Z, p, truncate_mode, + color_threshold=np.inf, get_leaves=True, + orientation='top', labels=None, + count_sort=False, distance_sort=False, + show_leaf_counts=False, i=-1, iv=0.0, + ivl=[], n=0, icoord_list=[], dcoord_list=[], + lvs=None, mhr=False, + current_color=[], color_list=[], + currently_below_threshold=[], + leaf_label_func=None, level=0, + contraction_marks=None, + link_color_func=None, + above_threshold_color='C0'): + """ + Calculate the endpoints of the links as well as the labels for the + the dendrogram rooted at the node with index i. iv is the independent + variable value to plot the left-most leaf node below the root node i + (if orientation='top', this would be the left-most x value where the + plotting of this root node i and its descendents should begin). + + ivl is a list to store the labels of the leaf nodes. The leaf_label_func + is called whenever ivl != None, labels == None, and + leaf_label_func != None. When ivl != None and labels != None, the + labels list is used only for labeling the leaf nodes. When + ivl == None, no labels are generated for leaf nodes. + + When get_leaves==True, a list of leaves is built as they are visited + in the dendrogram. + + Returns a tuple with l being the independent variable coordinate that + corresponds to the midpoint of cluster to the left of cluster i if + i is non-singleton, otherwise the independent coordinate of the leaf + node if i is a leaf node. + + Returns + ------- + A tuple (left, w, h, md), where: + * left is the independent variable coordinate of the center of the + the U of the subtree + + * w is the amount of space used for the subtree (in independent + variable units) + + * h is the height of the subtree in dependent variable units + + * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including + the target node. + + """ + xp = array_namespace(Z) + if n == 0: + raise ValueError("Invalid singleton cluster count n.") + + if i == -1: + raise ValueError("Invalid root cluster index i.") + + if truncate_mode == 'lastp': + # If the node is a leaf node but corresponds to a non-singleton + # cluster, its label is either the empty string or the number of + # original observations belonging to cluster i. + if 2*n - p > i >= n: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + elif truncate_mode == 'level': + if i > n and level > p: + d = Z[i - n, 2] + _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels, + show_leaf_counts) + if contraction_marks is not None: + _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp) + return (iv + 5.0, 10.0, 0.0, d) + elif i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # Otherwise, only truncate if we have a leaf node. + # + # Only place leaves if they correspond to original observations. + if i < n: + _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, + leaf_label_func, i, labels) + return (iv + 5.0, 10.0, 0.0, 0.0) + + # !!! Otherwise, we don't have a leaf node, so work on plotting a + # non-leaf node. + # Actual indices of a and b + aa = int_floor(Z[i - n, 0], xp) + ab = int_floor(Z[i - n, 1], xp) + if aa >= n: + # The number of singletons below cluster a + na = Z[aa - n, 3] + # The distance between a's two direct children. + da = Z[aa - n, 2] + else: + na = 1 + da = 0.0 + if ab >= n: + nb = Z[ab - n, 3] + db = Z[ab - n, 2] + else: + nb = 1 + db = 0.0 + + if count_sort == 'ascending' or count_sort is True: + # If a has a count greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if na > nb: + # The cluster index to draw to the left (ua) will be ab + # and the one to draw to the right (ub) will be aa + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif count_sort == 'descending': + # If a has a count less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if na > nb: + ua = aa + ub = ab + else: + ua = ab + ub = aa + elif distance_sort == 'ascending' or distance_sort is True: + # If a has a distance greater than b, it and its descendents should + # be drawn to the right. Otherwise, to the left. + if da > db: + ua = ab + ub = aa + else: + ua = aa + ub = ab + elif distance_sort == 'descending': + # If a has a distance less than or equal to b, it and its + # descendents should be drawn to the left. Otherwise, to + # the right. + if da > db: + ua = aa + ub = ab + else: + ua = ab + ub = aa + else: + ua = aa + ub = ab + + # Updated iv variable and the amount of space used. + (uiva, uwa, uah, uamd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ua, iv=iv, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + h = Z[i - n, 2] + if h >= color_threshold or color_threshold <= 0: + c = above_threshold_color + + if currently_below_threshold[0]: + current_color[0] = (current_color[0] + 1) % len(_link_line_colors) + currently_below_threshold[0] = False + else: + currently_below_threshold[0] = True + c = _link_line_colors[current_color[0]] + + (uivb, uwb, ubh, ubmd) = \ + _dendrogram_calculate_info( + Z=Z, p=p, + truncate_mode=truncate_mode, + color_threshold=color_threshold, + get_leaves=get_leaves, + orientation=orientation, + labels=labels, + count_sort=count_sort, + distance_sort=distance_sort, + show_leaf_counts=show_leaf_counts, + i=ub, iv=iv + uwa, ivl=ivl, n=n, + icoord_list=icoord_list, + dcoord_list=dcoord_list, lvs=lvs, + current_color=current_color, + color_list=color_list, + currently_below_threshold=currently_below_threshold, + leaf_label_func=leaf_label_func, + level=level + 1, contraction_marks=contraction_marks, + link_color_func=link_color_func, + above_threshold_color=above_threshold_color) + + max_dist = max(uamd, ubmd, h) + + icoord_list.append([uiva, uiva, uivb, uivb]) + dcoord_list.append([uah, h, h, ubh]) + if link_color_func is not None: + v = link_color_func(int(i)) + if not isinstance(v, str): + raise TypeError("link_color_func must return a matplotlib " + "color string!") + color_list.append(v) + else: + color_list.append(c) + + return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) + + +def is_isomorphic(T1, T2): + """ + Determine if two different cluster assignments are equivalent. + + Parameters + ---------- + T1 : array_like + An assignment of singleton cluster ids to flat cluster ids. + T2 : array_like + An assignment of singleton cluster ids to flat cluster ids. + + Returns + ------- + b : bool + Whether the flat cluster assignments `T1` and `T2` are + equivalent. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic + >>> from scipy.cluster.hierarchy import single, complete + >>> from scipy.spatial.distance import pdist + + Two flat cluster assignments can be isomorphic if they represent the same + cluster assignment, with different labels. + + For example, we can use the `scipy.cluster.hierarchy.single`: method + and flatten the output to four clusters: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = single(pdist(X)) + >>> T = fcluster(Z, 1, criterion='distance') + >>> T + array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32) + + We can then do the same using the + `scipy.cluster.hierarchy.complete`: method: + + >>> Z = complete(pdist(X)) + >>> T_ = fcluster(Z, 1.5, criterion='distance') + >>> T_ + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + As we can see, in both cases we obtain four clusters and all the data + points are distributed in the same way - the only thing that changes + are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both + cluster assignments are isomorphic: + + >>> is_isomorphic(T, T_) + True + + """ + T1 = np.asarray(T1, order='c') + T2 = np.asarray(T2, order='c') + + T1S = T1.shape + T2S = T2.shape + + if len(T1S) != 1: + raise ValueError('T1 must be one-dimensional.') + if len(T2S) != 1: + raise ValueError('T2 must be one-dimensional.') + if T1S[0] != T2S[0]: + raise ValueError('T1 and T2 must have the same number of elements.') + n = T1S[0] + d1 = {} + d2 = {} + for i in range(0, n): + if T1[i] in d1: + if T2[i] not in d2: + return False + if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: + return False + elif T2[i] in d2: + return False + else: + d1[T1[i]] = T2[i] + d2[T2[i]] = T1[i] + return True + + +def maxdists(Z): + """ + Return the maximum distance between any non-singleton cluster. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + ``linkage`` for more information. + + Returns + ------- + maxdists : ndarray + A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents + the maximum distance between any cluster (including + singletons) below and including the node with index i. More + specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the + set of all node indices below and including node i. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + is_monotonic : for testing for monotonicity of a linkage matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, maxdists + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists` + computes for each new cluster generated (i.e., for each row of the linkage + matrix) what is the maximum distance between any two child clusters. + + Due to the nature of hierarchical clustering, in many cases this is going + to be just the distance between the two child clusters that were merged + to form the current one - that is, Z[:,2]. + + However, for non-monotonic cluster assignments such as + `scipy.cluster.hierarchy.median` clustering this is not always the + case: There may be cluster formations were the distance between the two + clusters merged is smaller than the distance between their children. + + We can see this in an example: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> maxdists(Z) + array([1. , 1. , 1. , 1. , 1.11803399, + 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 , + 3.5 ]) + + Note that while the distance between the two clusters merged when creating the + last cluster is 3.25, there are two children (clusters 16 and 17) whose distance + is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in + this case. + + """ + xp = array_namespace(Z) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + n = Z.shape[0] + 1 + MD = np.zeros((n - 1,)) + Z = np.asarray(Z) + _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) + MD = xp.asarray(MD) + return MD + + +def maxinconsts(Z, R): + """ + Return the maximum inconsistency coefficient for each + non-singleton cluster and its children. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + R : ndarray + The inconsistency matrix. + + Returns + ------- + MI : ndarray + A monotonic ``(n-1)``-sized numpy array of doubles. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 2. , 12. , 1.11803399, 3. ], + [ 5. , 13. , 1.11803399, 3. ], + [ 8. , 15. , 1.11803399, 3. ], + [11. , 14. , 1.11803399, 3. ], + [18. , 19. , 3. , 6. ], + [16. , 17. , 3.5 , 6. ], + [20. , 21. , 3.25 , 12. ]]) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute + the maximum value of the inconsistency statistic (the last column of + ``R``) for each non-singleton cluster and its children: + + >>> maxinconsts(Z, R) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + n = Z.shape[0] + 1 + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + MI = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) + MI = xp.asarray(MI) + return MI + + +def maxRstat(Z, R, i): + """ + Return the maximum statistic for each non-singleton cluster and its + children. + + Parameters + ---------- + Z : array_like + The hierarchical clustering encoded as a matrix. See `linkage` for more + information. + R : array_like + The inconsistency matrix. + i : int + The column of `R` to use as the statistic. + + Returns + ------- + MR : ndarray + Calculates the maximum statistic for the i'th column of the + inconsistency matrix `R` for each non-singleton cluster + node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where + ``Q(j)`` the set of all node ids corresponding to nodes below + and including ``j``. + + See Also + -------- + linkage : for a description of what a linkage matrix is. + inconsistent : for the creation of a inconsistency matrix. + + Examples + -------- + >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat + >>> from scipy.spatial.distance import pdist + + Given a data set ``X``, we can apply a clustering method to obtain a + linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can + be also used to obtain the inconsistency matrix ``R`` associated to + this clustering process: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = median(pdist(X)) + >>> R = inconsistent(Z) + >>> R + array([[1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1. , 0. , 1. , 0. ], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.05901699, 0.08346263, 2. , 0.70710678], + [1.74535599, 1.08655358, 3. , 1.15470054], + [1.91202266, 1.37522872, 3. , 1.15470054], + [3.25 , 0.25 , 3. , 0. ]]) + + `scipy.cluster.hierarchy.maxRstat` can be used to compute + the maximum value of each column of ``R``, for each non-singleton + cluster and its children: + + >>> maxRstat(Z, R, 0) + array([1. , 1. , 1. , 1. , 1.05901699, + 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, + 3.25 ]) + >>> maxRstat(Z, R, 1) + array([0. , 0. , 0. , 0. , 0.08346263, + 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, + 1.37522872]) + >>> maxRstat(Z, R, 3) + array([0. , 0. , 0. , 0. , 0.70710678, + 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, + 1.15470054]) + + """ + xp = array_namespace(Z, R) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + R = _asarray(R, order='C', dtype=xp.float64, xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + is_valid_im(R, throw=True, name='R') + + if not isinstance(i, int): + raise TypeError('The third argument must be an integer.') + + if i < 0 or i > 3: + raise ValueError('i must be an integer between 0 and 3 inclusive.') + + if Z.shape[0] != R.shape[0]: + raise ValueError("The inconsistency matrix and linkage matrix each " + "have a different number of rows.") + + n = Z.shape[0] + 1 + MR = np.zeros((n - 1,)) + Z = np.asarray(Z) + R = np.asarray(R) + _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) + MR = xp.asarray(MR) + return MR + + +def leaders(Z, T): + """ + Return the root nodes in a hierarchical clustering. + + Returns the root nodes in a hierarchical clustering corresponding + to a cut defined by a flat cluster assignment vector ``T``. See + the ``fcluster`` function for more information on the format of ``T``. + + For each flat cluster :math:`j` of the :math:`k` flat clusters + represented in the n-sized flat cluster assignment vector ``T``, + this function finds the lowest cluster node :math:`i` in the linkage + tree Z, such that: + + * leaf descendants belong only to flat cluster j + (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where + :math:`S(i)` is the set of leaf ids of descendant leaf nodes + with cluster node :math:`i`) + + * there does not exist a leaf that is not a descendant with + :math:`i` that also belongs to cluster :math:`j` + (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If + this condition is violated, ``T`` is not a valid cluster + assignment vector, and an exception will be thrown. + + Parameters + ---------- + Z : ndarray + The hierarchical clustering encoded as a matrix. See + `linkage` for more information. + T : ndarray + The flat cluster assignment vector. + + Returns + ------- + L : ndarray + The leader linkage node id's stored as a k-element 1-D array, + where ``k`` is the number of flat clusters found in ``T``. + + ``L[j]=i`` is the linkage cluster node id that is the + leader of flat cluster with id M[j]. If ``i < n``, ``i`` + corresponds to an original observation, otherwise it + corresponds to a non-singleton cluster. + M : ndarray + The leader linkage node id's stored as a k-element 1-D array, where + ``k`` is the number of flat clusters found in ``T``. This allows the + set of flat cluster ids to be any arbitrary set of ``k`` integers. + + For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with + id 8's leader is linkage node 2. + + See Also + -------- + fcluster : for the creation of flat cluster assignments. + + Examples + -------- + >>> from scipy.cluster.hierarchy import ward, fcluster, leaders + >>> from scipy.spatial.distance import pdist + + Given a linkage matrix ``Z`` - obtained after apply a clustering method + to a dataset ``X`` - and a flat cluster assignment array ``T``: + + >>> X = [[0, 0], [0, 1], [1, 0], + ... [0, 4], [0, 3], [1, 4], + ... [4, 0], [3, 0], [4, 1], + ... [4, 4], [3, 4], [4, 3]] + + >>> Z = ward(pdist(X)) + >>> Z + array([[ 0. , 1. , 1. , 2. ], + [ 3. , 4. , 1. , 2. ], + [ 6. , 7. , 1. , 2. ], + [ 9. , 10. , 1. , 2. ], + [ 2. , 12. , 1.29099445, 3. ], + [ 5. , 13. , 1.29099445, 3. ], + [ 8. , 14. , 1.29099445, 3. ], + [11. , 15. , 1.29099445, 3. ], + [16. , 17. , 5.77350269, 6. ], + [18. , 19. , 5.77350269, 6. ], + [20. , 21. , 8.16496581, 12. ]]) + + >>> T = fcluster(Z, 3, criterion='distance') + >>> T + array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32) + + `scipy.cluster.hierarchy.leaders` returns the indices of the nodes + in the dendrogram that are the leaders of each flat cluster: + + >>> L, M = leaders(Z, T) + >>> L + array([16, 17, 18, 19], dtype=int32) + + (remember that indices 0-11 point to the 12 data points in ``X``, + whereas indices 12-22 point to the 11 rows of ``Z``) + + `scipy.cluster.hierarchy.leaders` also returns the indices of + the flat clusters in ``T``: + + >>> M + array([1, 2, 3, 4], dtype=int32) + + """ + xp = array_namespace(Z, T) + Z = _asarray(Z, order='C', dtype=xp.float64, xp=xp) + T = _asarray(T, order='C', xp=xp) + is_valid_linkage(Z, throw=True, name='Z') + + if T.dtype != xp.int32: + raise TypeError('T must be a 1-D array of dtype int32.') + + if T.shape[0] != Z.shape[0] + 1: + raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') + + n_clusters = int(xp.unique_values(T).shape[0]) + n_obs = int(Z.shape[0] + 1) + L = np.zeros(n_clusters, dtype=np.int32) + M = np.zeros(n_clusters, dtype=np.int32) + Z = np.asarray(Z) + T = np.asarray(T, dtype=np.int32) + s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs) + if s >= 0: + raise ValueError(('T is not a valid assignment vector. Error found ' + 'when examining linkage node %d (< 2n-1).') % s) + L, M = xp.asarray(L), xp.asarray(M) + return (L, M) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f3f3ceac4a8c9a7045d7aa201071206ee77ee93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c256efb5e6401c2d1e4db22be89a2f6accdf47 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18e60cc7c0e89c2897ad2e6c6bc1cbf4cba05fc5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..7d874ca5eb7141a44559307d1c28dd412171396f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py @@ -0,0 +1,145 @@ +from numpy import array + + +Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], + [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], + [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], + [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], + [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], + [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], + [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], + [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], + [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], + [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], + [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], + [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], + [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], + [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], + [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], + [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], + [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], + [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], + [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], + [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], + [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], + [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], + [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], + [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], + [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], + [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], + [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], + [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], + [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], + [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) + +ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., + 564., 138., 219., 869., 669.]) + +linkage_ytdist_single = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +linkage_ytdist_complete = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [1., 6., 400., 3.], + [0., 7., 412., 3.], + [8., 9., 996., 6.]]) + +linkage_ytdist_average = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 680.77777778, 6.]]) + +linkage_ytdist_weighted = array([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 333.5, 3.], + [1., 6., 347.5, 3.], + [8., 9., 670.125, 6.]]) + +# the optimal leaf ordering of linkage_ytdist_single +linkage_ytdist_single_olo = array([[5., 2., 138., 2.], + [4., 3., 219., 2.], + [7., 0., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]]) + +X = array([[1.43054825, -7.5693489], + [6.95887839, 6.82293382], + [2.87137846, -9.68248579], + [7.87974764, -6.05485803], + [8.24018364, -6.09495602], + [7.39020262, 8.54004355]]) + +linkage_X_centroid = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_median = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 6.43614494, 4.], + [7., 9., 15.17363237, 6.]]) + +linkage_X_ward = array([[3., 4., 0.36265956, 2.], + [1., 5., 1.77045373, 2.], + [0., 2., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +# the optimal leaf ordering of linkage_X_ward +linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], + [5., 1., 1.77045373, 2.], + [2., 0., 2.55760419, 2.], + [6., 8., 9.10208346, 4.], + [7., 9., 24.7784379, 6.]]) + +inconsistent_ytdist = { + 1: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [255., 0., 1., 0.], + [268., 0., 1., 0.], + [295., 0., 1., 0.]]), + 2: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [261.5, 9.19238816, 2., 0.70710678], + [233.66666667, 83.9424406, 3., 0.7306594]]), + 3: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [239., 69.36377537, 4., 0.80733783]]), + 4: array([[138., 0., 1., 0.], + [219., 0., 1., 0.], + [237., 25.45584412, 2., 0.70710678], + [247.33333333, 25.38372182, 3., 0.81417007], + [235., 60.73302232, 5., 0.98793042]])} + +fcluster_inconsistent = { + 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_distance = { + 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, + 1, 1, 1, 2, 1, 1, 1, 1, 1]), + 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} + +fcluster_maxclust = { + 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, + 1, 1, 1, 3, 1, 1, 1, 1, 2]), + 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1]), + 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1])} diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py new file mode 100644 index 0000000000000000000000000000000000000000..a73512d35eef168f625a1942a87d248e73a71aa2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py @@ -0,0 +1,202 @@ +import pytest +from pytest import raises as assert_raises +import numpy as np +from scipy.cluster.hierarchy import DisjointSet +import string + + +def generate_random_token(): + k = len(string.ascii_letters) + tokens = list(np.arange(k, dtype=int)) + tokens += list(np.arange(k, dtype=float)) + tokens += list(string.ascii_letters) + tokens += [None for i in range(k)] + tokens = np.array(tokens, dtype=object) + rng = np.random.RandomState(seed=0) + + while 1: + size = rng.randint(1, 3) + element = rng.choice(tokens, size) + if size == 1: + yield element[0] + else: + yield tuple(element) + + +def get_elements(n): + # dict is deterministic without difficulty of comparing numpy ints + elements = {} + for element in generate_random_token(): + if element not in elements: + elements[element] = len(elements) + if len(elements) >= n: + break + return list(elements.keys()) + + +def test_init(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert dis.n_subsets == n + assert list(dis) == elements + + +def test_len(): + n = 10 + elements = get_elements(n) + dis = DisjointSet(elements) + assert len(dis) == n + + dis.add("dummy") + assert len(dis) == n + 1 + + +@pytest.mark.parametrize("n", [10, 100]) +def test_contains(n): + elements = get_elements(n) + dis = DisjointSet(elements) + for x in elements: + assert x in dis + + assert "dummy" not in dis + + +@pytest.mark.parametrize("n", [10, 100]) +def test_add(n): + elements = get_elements(n) + dis1 = DisjointSet(elements) + + dis2 = DisjointSet() + for i, x in enumerate(elements): + dis2.add(x) + assert len(dis2) == i + 1 + + # test idempotency by adding element again + dis2.add(x) + assert len(dis2) == i + 1 + + assert list(dis1) == list(dis2) + + +def test_element_not_present(): + elements = get_elements(n=10) + dis = DisjointSet(elements) + + with assert_raises(KeyError): + dis["dummy"] + + with assert_raises(KeyError): + dis.merge(elements[0], "dummy") + + with assert_raises(KeyError): + dis.connected(elements[0], "dummy") + + +@pytest.mark.parametrize("direction", ["forwards", "backwards"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_linear_union_sequence(n, direction): + elements = get_elements(n) + dis = DisjointSet(elements) + assert elements == list(dis) + + indices = list(range(n - 1)) + if direction == "backwards": + indices = indices[::-1] + + for it, i in enumerate(indices): + assert not dis.connected(elements[i], elements[i + 1]) + assert dis.merge(elements[i], elements[i + 1]) + assert dis.connected(elements[i], elements[i + 1]) + assert dis.n_subsets == n - 1 - it + + roots = [dis[i] for i in elements] + if direction == "forwards": + assert all(elements[0] == r for r in roots) + else: + assert all(elements[-2] == r for r in roots) + assert not dis.merge(elements[0], elements[-1]) + + +@pytest.mark.parametrize("n", [10, 100]) +def test_self_unions(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + for x in elements: + assert dis.connected(x, x) + assert not dis.merge(x, x) + assert dis.connected(x, x) + assert dis.n_subsets == len(elements) + + assert elements == list(dis) + roots = [dis[x] for x in elements] + assert elements == roots + + +@pytest.mark.parametrize("order", ["ab", "ba"]) +@pytest.mark.parametrize("n", [10, 100]) +def test_equal_size_ordering(n, order): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + indices = np.arange(n) + rng.shuffle(indices) + + for i in range(0, len(indices), 2): + a, b = elements[indices[i]], elements[indices[i + 1]] + if order == "ab": + assert dis.merge(a, b) + else: + assert dis.merge(b, a) + + expected = elements[min(indices[i], indices[i + 1])] + assert dis[a] == expected + assert dis[b] == expected + + +@pytest.mark.parametrize("kmax", [5, 10]) +def test_binary_tree(kmax): + n = 2**kmax + elements = get_elements(n) + dis = DisjointSet(elements) + rng = np.random.RandomState(seed=0) + + for k in 2**np.arange(kmax): + for i in range(0, n, 2 * k): + r1, r2 = rng.randint(0, k, size=2) + a, b = elements[i + r1], elements[i + k + r2] + assert not dis.connected(a, b) + assert dis.merge(a, b) + assert dis.connected(a, b) + + assert elements == list(dis) + roots = [dis[i] for i in elements] + expected_indices = np.arange(n) - np.arange(n) % (2 * k) + expected = [elements[i] for i in expected_indices] + assert roots == expected + + +@pytest.mark.parametrize("n", [10, 100]) +def test_subsets(n): + elements = get_elements(n) + dis = DisjointSet(elements) + + rng = np.random.RandomState(seed=0) + for i, j in rng.randint(0, n, (n, 2)): + x = elements[i] + y = elements[j] + + expected = {element for element in dis if {dis[element]} == {dis[x]}} + assert dis.subset_size(x) == len(dis.subset(x)) + assert expected == dis.subset(x) + + expected = {dis[element]: set() for element in dis} + for element in dis: + expected[dis[element]].add(element) + expected = list(expected.values()) + assert expected == dis.subsets() + + dis.merge(x, y) + assert dis.subset(x) == dis.subset(y) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..c474bee6f4b943a1dd9047374f5791a728ccd8be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py @@ -0,0 +1,1225 @@ +# +# Author: Damian Eads +# Date: April 17, 2008 +# +# Copyright (C) 2008 Damian Eads +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import numpy as np +from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns +import pytest +from pytest import raises as assert_raises + +import scipy.cluster.hierarchy +from scipy.cluster.hierarchy import ( + ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, + num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, + is_isomorphic, single, leaders, + correspond, is_monotonic, maxdists, maxinconsts, maxRstat, + is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, + set_link_color_palette, cut_tree, optimal_leaf_ordering, + _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) +from scipy.spatial.distance import pdist +from scipy.cluster._hierarchy import Heap +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import xp_assert_close, xp_assert_equal + +from . import hierarchy_test_data + + +# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so +# check if it's available +try: + import matplotlib + # and set the backend to be Agg (no gui) + matplotlib.use('Agg') + # before importing pyplot + import matplotlib.pyplot as plt + have_matplotlib = True +except Exception: + have_matplotlib = False + + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + + +class TestLinkage: + + @skip_if_array_api(cpu_only=True) + def test_linkage_non_finite_elements_in_distance_matrix(self, xp): + # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). + # Exception expected. + y = xp.zeros((6,)) + y[0] = xp.nan + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_empty_distance_matrix(self, xp): + # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. + y = xp.zeros((0,)) + assert_raises(ValueError, linkage, y) + + @skip_if_array_api(cpu_only=True) + def test_linkage_tdist(self, xp): + for method in ['single', 'complete', 'average', 'weighted']: + self.check_linkage_tdist(method, xp) + + def check_linkage_tdist(self, method, xp): + # Tests linkage(Y, method) on the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + @skip_if_array_api(cpu_only=True) + def test_linkage_X(self, xp): + for method in ['centroid', 'median', 'ward']: + self.check_linkage_q(method, xp) + + def check_linkage_q(self, method, xp): + # Tests linkage(Y, method) on the Q data set. + Z = linkage(xp.asarray(hierarchy_test_data.X), method) + expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + y = scipy.spatial.distance.pdist(hierarchy_test_data.X, + metric="euclidean") + Z = linkage(xp.asarray(y), method) + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + @skip_if_array_api(cpu_only=True) + def test_compare_with_trivial(self, xp): + rng = np.random.RandomState(0) + n = 20 + X = rng.rand(n, 2) + d = pdist(X) + + for method, code in _LINKAGE_METHODS.items(): + Z_trivial = _hierarchy.linkage(d, n, code) + Z = linkage(xp.asarray(d), method) + xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_optimal_leaf_ordering(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True) + expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + +@skip_if_array_api(cpu_only=True) +class TestLinkageTies: + + _expectations = { + 'single': np.array([[0, 1, 1.41421356, 2], + [2, 3, 1.41421356, 3]]), + 'complete': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.82842712, 3]]), + 'average': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'weighted': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'centroid': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'median': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.12132034, 3]]), + 'ward': np.array([[0, 1, 1.41421356, 2], + [2, 3, 2.44948974, 3]]), + } + + def test_linkage_ties(self, xp): + for method in ['single', 'complete', 'average', 'weighted', + 'centroid', 'median', 'ward']: + self.check_linkage_ties(method, xp) + + def check_linkage_ties(self, method, xp): + X = xp.asarray([[-1, -1], [0, 0], [1, 1]]) + Z = linkage(X, method=method) + expectedZ = self._expectations[method] + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(cpu_only=True) +class TestInconsistent: + + def test_inconsistent_tdist(self, xp): + for depth in hierarchy_test_data.inconsistent_ytdist: + self.check_inconsistent_tdist(depth, xp) + + def check_inconsistent_tdist(self, depth, xp): + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + xp_assert_close(inconsistent(Z, depth), + xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth])) + + +@skip_if_array_api(cpu_only=True) +class TestCopheneticDistance: + + def test_linkage_cophenet_tdist_Z(self, xp): + # Tests cophenet(Z) on tdist data set. + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295]) + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + M = cophenet(Z) + xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10) + + def test_linkage_cophenet_tdist_Z_Y(self, xp): + # Tests cophenet(Z, Y) on tdist data set. + Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single) + (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist)) + expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, + 295, 138, 219, 295, 295], dtype=xp.float64) + expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()] + xp_assert_close(c, expectedc, atol=1e-10) + xp_assert_close(M, expectedM, atol=1e-10) + + +class TestMLabLinkageConversion: + + def test_mlab_linkage_conversion_empty(self, xp): + # Tests from/to_mlab_linkage on empty linkage array. + X = xp.asarray([], dtype=xp.float64) + xp_assert_equal(from_mlab_linkage(X), X) + xp_assert_equal(to_mlab_linkage(X), X) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_single_row(self, xp): + # Tests from/to_mlab_linkage on linkage array with single row. + Z = xp.asarray([[0., 1., 3., 2.]]) + Zm = xp.asarray([[1, 2, 3]]) + xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64), + rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_mlab_linkage_conversion_multiple_rows(self, xp): + # Tests from/to_mlab_linkage on linkage array with multiple rows. + Zm = xp.asarray([[3, 6, 138], [4, 5, 219], + [1, 8, 255], [2, 9, 268], [7, 10, 295]]) + Z = xp.asarray([[2., 5., 138., 2.], + [3., 4., 219., 2.], + [0., 7., 255., 3.], + [1., 8., 268., 4.], + [6., 9., 295., 6.]], + dtype=xp.float64) + xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15) + xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestFcluster: + + def test_fclusterdata(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fclusterdata(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fclusterdata(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fclusterdata(t, 'maxclust', xp) + + def check_fclusterdata(self, t, criterion, xp): + # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + X = xp.asarray(hierarchy_test_data.Q_X) + T = fclusterdata(X, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster(self, xp): + for t in hierarchy_test_data.fcluster_inconsistent: + self.check_fcluster(t, 'inconsistent', xp) + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster(t, 'distance', xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster(t, 'maxclust', xp) + + def check_fcluster(self, t, criterion, xp): + # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. + expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, criterion=criterion, t=t) + assert_(is_isomorphic(T, expectedT)) + + def test_fcluster_monocrit(self, xp): + for t in hierarchy_test_data.fcluster_distance: + self.check_fcluster_monocrit(t, xp) + for t in hierarchy_test_data.fcluster_maxclust: + self.check_fcluster_maxclust_monocrit(t, xp) + + def check_fcluster_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + def check_fcluster_maxclust_monocrit(self, t, xp): + expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t]) + Z = single(xp.asarray(hierarchy_test_data.Q_X)) + T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) + assert_(is_isomorphic(T, expectedT)) + + +@skip_if_array_api(cpu_only=True) +class TestLeaders: + + def test_leaders_single(self, xp): + # Tests leaders using a flat clustering generated by single linkage. + X = hierarchy_test_data.Q_X + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + T = fcluster(Z, criterion='maxclust', t=3) + Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1])) + T = xp.asarray(T, dtype=xp.int32) + L = leaders(Z, T) + assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15) + + +@skip_if_array_api(np_only=True, + reasons=['`is_isomorphic` only supports NumPy backend']) +class TestIsIsomorphic: + + @skip_if_array_api(np_only=True, + reasons=['array-likes only supported for NumPy backend']) + def test_array_like(self, xp): + assert is_isomorphic([1, 1, 1], [2, 2, 2]) + assert is_isomorphic([], []) + + def test_is_isomorphic_1(self, xp): + # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) + a = xp.asarray([1, 1, 1]) + b = xp.asarray([2, 2, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_2(self, xp): + # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) + a = xp.asarray([1, 7, 1]) + b = xp.asarray([2, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_3(self, xp): + # Tests is_isomorphic on test case #3 (no flat clusters) + a = xp.asarray([]) + b = xp.asarray([]) + assert is_isomorphic(a, b) + + def test_is_isomorphic_4A(self, xp): + # Tests is_isomorphic on test case #4A + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_4B(self, xp): + # Tests is_isomorphic on test case #4B + # (3 flat clusters, different labelings, nonisomorphic) + a = xp.asarray([1, 2, 3, 3]) + b = xp.asarray([1, 3, 2, 3]) + assert is_isomorphic(a, b) is False + assert is_isomorphic(b, a) is False + + def test_is_isomorphic_4C(self, xp): + # Tests is_isomorphic on test case #4C + # (3 flat clusters, different labelings, isomorphic) + a = xp.asarray([7, 2, 3]) + b = xp.asarray([6, 3, 2]) + assert is_isomorphic(a, b) + assert is_isomorphic(b, a) + + def test_is_isomorphic_5(self, xp): + # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling). + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, xp=xp) + + def test_is_isomorphic_6(self, xp): + # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random + # clusters, random permutation of the labeling, slightly + # nonisomorphic.) + for nc in [2, 3, 5]: + self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp) + + def test_is_isomorphic_7(self, xp): + # Regression test for gh-6271 + a = xp.asarray([1, 2, 3]) + b = xp.asarray([1, 1, 1]) + assert not is_isomorphic(a, b) + + def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0, + *, xp): + for k in range(3): + a = (np.random.rand(nobs) * nclusters).astype(int) + b = np.zeros(a.size, dtype=int) + P = np.random.permutation(nclusters) + for i in range(0, a.shape[0]): + b[i] = P[a[i]] + if noniso: + Q = np.random.permutation(nobs) + b[Q[0:nerrors]] += 1 + b[Q[0:nerrors]] %= nclusters + a = xp.asarray(a) + b = xp.asarray(b) + assert is_isomorphic(a, b) == (not noniso) + assert is_isomorphic(b, a) == (not noniso) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidLinkage: + + def test_is_valid_linkage_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp) + + def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_linkage(Z) with linkage matrices of various sizes + Z = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + Z = Z[:nrow, :ncol] + assert_(is_valid_linkage(Z) == valid) + if not valid: + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_int_type(self, xp): + # Tests is_valid_linkage(Z) with integer type. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_linkage(Z) is False) + assert_raises(TypeError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_empty(self, xp): + # Tests is_valid_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(is_valid_linkage(Z) is True) + + def test_is_valid_linkage_4_and_up_neg_index_left(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (left). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,0] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_index_right(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative indices (right). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,1] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_dist(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative distances. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,2] = -0.5 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + def test_is_valid_linkage_4_and_up_neg_counts(self, xp): + # Tests is_valid_linkage(Z) on linkage on observation sets between + # sizes 4 and 15 (step size 3) with negative counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + Z[i//2,3] = -2 + assert_(is_valid_linkage(Z) is False) + assert_raises(ValueError, is_valid_linkage, Z, throw=True) + + +@skip_if_array_api(cpu_only=True) +class TestIsValidInconsistent: + + def test_is_valid_im_int_type(self, xp): + # Tests is_valid_im(R) with integer type. + R = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.int64) + assert_(is_valid_im(R) is False) + assert_raises(TypeError, is_valid_im, R, throw=True) + + def test_is_valid_im_various_size(self, xp): + for nrow, ncol, valid in [(2, 5, False), (2, 3, False), + (1, 4, True), (2, 4, True)]: + self.check_is_valid_im_various_size(nrow, ncol, valid, xp) + + def check_is_valid_im_various_size(self, nrow, ncol, valid, xp): + # Tests is_valid_im(R) with linkage matrices of various sizes + R = xp.asarray([[0, 1, 3.0, 2, 5], + [3, 2, 4.0, 3, 3]], dtype=xp.float64) + R = R[:nrow, :ncol] + assert_(is_valid_im(R) == valid) + if not valid: + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_empty(self, xp): + # Tests is_valid_im(R) with empty inconsistency matrix. + R = xp.zeros((0, 4), dtype=xp.float64) + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + assert_(is_valid_im(R) is True) + + def test_is_valid_im_4_and_up_neg_index_left(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height means. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,0] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_index_right(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link height standard deviations. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,1] = -2.0 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + def test_is_valid_im_4_and_up_neg_dist(self, xp): + # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 + # (step size 3) with negative link counts. + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + R = inconsistent(Z) + R[i//2,2] = -0.5 + assert_(is_valid_im(R) is False) + assert_raises(ValueError, is_valid_im, R, throw=True) + + +class TestNumObsLinkage: + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_empty(self, xp): + # Tests num_obs_linkage(Z) with empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, num_obs_linkage, Z) + + def test_num_obs_linkage_1x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 2 observations. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 2) + + def test_num_obs_linkage_2x4(self, xp): + # Tests num_obs_linkage(Z) on linkage over 3 observations. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + assert_equal(num_obs_linkage(Z), 3) + + @skip_if_array_api(cpu_only=True) + def test_num_obs_linkage_4_and_up(self, xp): + # Tests num_obs_linkage(Z) on linkage on observation sets between sizes + # 4 and 15 (step size 3). + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_equal(num_obs_linkage(Z), i) + + +@skip_if_array_api(cpu_only=True) +class TestLeavesList: + + def test_leaves_list_1x4(self, xp): + # Tests leaves_list(Z) on a 1x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15) + + def test_leaves_list_2x4(self, xp): + # Tests leaves_list(Z) on a 2x4 linkage. + Z = xp.asarray([[0, 1, 3.0, 2], + [3, 2, 4.0, 3]], dtype=xp.float64) + to_tree(Z) + assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15) + + def test_leaves_list_Q(self, xp): + for method in ['single', 'complete', 'average', 'weighted', 'centroid', + 'median', 'ward']: + self.check_leaves_list_Q(method, xp) + + def check_leaves_list_Q(self, method, xp): + # Tests leaves_list(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + node = to_tree(Z) + assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15) + + def test_Q_subtree_pre_order(self, xp): + # Tests that pre_order() works when called on sub-trees. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + node = to_tree(Z) + assert_allclose(node.pre_order(), (node.get_left().pre_order() + + node.get_right().pre_order()), + rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestCorrespond: + + def test_correspond_empty(self, xp): + # Tests correspond(Z, y) with empty linkage and condensed distance matrix. + y = xp.zeros((0,), dtype=xp.float64) + Z = xp.zeros((0,4), dtype=xp.float64) + assert_raises(ValueError, correspond, Z, y) + + def test_correspond_2_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. + for i in range(2, 4): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + for i in range(4, 15, 3): + y = np.random.rand(i*(i-1)//2) + y = xp.asarray(y) + Z = linkage(y) + assert_(correspond(Z, y)) + + def test_correspond_4_and_up(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + + list(zip(list(range(3, 5)), list(range(2, 4))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_correspond_4_and_up_2(self, xp): + # Tests correspond(Z, y) on linkage and CDMs over observation sets of + # different sizes. Correspondence should be false. + for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + + list(zip(list(range(2, 7)), list(range(16, 21))))): + y = np.random.rand(i*(i-1)//2) + y2 = np.random.rand(j*(j-1)//2) + y = xp.asarray(y) + y2 = xp.asarray(y2) + Z = linkage(y) + Z2 = linkage(y2) + assert not correspond(Z, y2) + assert not correspond(Z2, y) + + def test_num_obs_linkage_multi_matrix(self, xp): + # Tests num_obs_linkage with observation matrices of multiple sizes. + for n in range(2, 10): + X = np.random.rand(n, 4) + Y = pdist(X) + Y = xp.asarray(Y) + Z = linkage(Y) + assert_equal(num_obs_linkage(Z), n) + + +@skip_if_array_api(cpu_only=True) +class TestIsMonotonic: + + def test_is_monotonic_empty(self, xp): + # Tests is_monotonic(Z) on an empty linkage. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, is_monotonic, Z) + + def test_is_monotonic_1x4(self, xp): + # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_T(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 3]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_2x4_F(self, xp): + # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. + Z = xp.asarray([[0, 1, 0.4, 2], + [2, 3, 0.3, 3]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_T(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert is_monotonic(Z) + + def test_is_monotonic_3x4_F1(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.2, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F2(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. + Z = xp.asarray([[0, 1, 0.8, 2], + [2, 3, 0.4, 2], + [4, 5, 0.6, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_3x4_F3(self, xp): + # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False + Z = xp.asarray([[0, 1, 0.3, 2], + [2, 3, 0.4, 2], + [4, 5, 0.2, 4]], dtype=xp.float64) + assert not is_monotonic(Z) + + def test_is_monotonic_tdist_linkage1(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Expecting True. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert is_monotonic(Z) + + def test_is_monotonic_tdist_linkage2(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # tdist data set. Perturbing. Expecting False. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + Z[2,2] = 0.0 + assert not is_monotonic(Z) + + def test_is_monotonic_Q_linkage(self, xp): + # Tests is_monotonic(Z) on clustering generated by single linkage on + # Q data set. Expecting True. + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, 'single') + assert is_monotonic(Z) + + +@skip_if_array_api(cpu_only=True) +class TestMaxDists: + + def test_maxdists_empty_linkage(self, xp): + # Tests maxdists(Z) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxdists, Z) + + def test_maxdists_one_cluster_linkage(self, xp): + # Tests maxdists(Z) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + def test_maxdists_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxdists_Q_linkage(method, xp) + + def check_maxdists_Q_linkage(self, method, xp): + # Tests maxdists(Z) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + MD = maxdists(Z) + expectedMD = calculate_maximum_distances(Z, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxInconsts: + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_empty_linkage(self, xp): + # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxinconsts, Z, R) + + def test_maxinconsts_difrow_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxinconsts, Z, R) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_one_cluster_linkage(self, xp): + # Tests maxinconsts(Z, R) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxinconsts_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + self.check_maxinconsts_Q_linkage(method, xp) + + def check_maxinconsts_Q_linkage(self, method, xp): + # Tests maxinconsts(Z, R) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxinconsts(Z, R) + expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +class TestMaxRStat: + + def test_maxRstat_invalid_index(self, xp): + for i in [3.3, -1, 4]: + self.check_maxRstat_invalid_index(i, xp) + + def check_maxRstat_invalid_index(self, i, xp): + # Tests maxRstat(Z, R, i). Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + if isinstance(i, int): + assert_raises(ValueError, maxRstat, Z, R, i) + else: + assert_raises(TypeError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_empty_linkage(self, xp): + for i in range(4): + self.check_maxRstat_empty_linkage(i, xp) + + def check_maxRstat_empty_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. + Z = xp.zeros((0, 4), dtype=xp.float64) + R = xp.zeros((0, 4), dtype=xp.float64) + assert_raises(ValueError, maxRstat, Z, R, i) + + def test_maxRstat_difrow_linkage(self, xp): + for i in range(4): + self.check_maxRstat_difrow_linkage(i, xp) + + def check_maxRstat_difrow_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with + # different numbers of clusters. Expecting exception. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = np.random.rand(2, 4) + R = xp.asarray(R) + assert_raises(ValueError, maxRstat, Z, R, i) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_one_cluster_linkage(self, xp): + for i in range(4): + self.check_maxRstat_one_cluster_linkage(i, xp) + + def check_maxRstat_one_cluster_linkage(self, i, xp): + # Tests maxRstat(Z, R, i) on linkage with one cluster. + Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64) + R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + @skip_if_array_api(cpu_only=True) + def test_maxRstat_Q_linkage(self, xp): + for method in ['single', 'complete', 'ward', 'centroid', 'median']: + for i in range(4): + self.check_maxRstat_Q_linkage(method, i, xp) + + def check_maxRstat_Q_linkage(self, method, i, xp): + # Tests maxRstat(Z, R, i) on the Q data set + X = xp.asarray(hierarchy_test_data.Q_X) + Z = linkage(X, method) + R = inconsistent(Z) + MD = maxRstat(Z, R, 1) + expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp) + xp_assert_close(MD, expectedMD, atol=1e-15) + + +@skip_if_array_api(cpu_only=True) +class TestDendrogram: + + def test_dendrogram_single_linkage_tdist(self, xp): + # Tests dendrogram calculation on single linkage of the tdist data set. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + R = dendrogram(Z, no_plot=True) + leaves = R["leaves"] + assert_equal(leaves, [2, 5, 1, 0, 3, 4]) + + def test_valid_orientation(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + assert_raises(ValueError, dendrogram, Z, orientation="foo") + + def test_labels_as_array_or_list(self, xp): + # test for gh-12418 + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + labels = xp.asarray([1, 3, 2, 6, 4, 5]) + result1 = dendrogram(Z, labels=labels, no_plot=True) + result2 = dendrogram(Z, labels=list(labels), no_plot=True) + assert result1 == result2 + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_valid_label_size(self, xp): + link = xp.asarray([ + [0, 1, 1.0, 4], + [2, 3, 1.0, 5], + [4, 5, 2.0, 6], + ]) + plt.figure() + with pytest.raises(ValueError) as exc_info: + dendrogram(link, labels=list(range(100))) + assert "Dimensions of Z and labels must be consistent."\ + in str(exc_info.value) + + with pytest.raises( + ValueError, + match="Dimensions of Z and labels must be consistent."): + dendrogram(link, labels=[]) + + plt.close() + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_plot(self, xp): + for orientation in ['top', 'bottom', 'left', 'right']: + self.check_dendrogram_plot(orientation, xp) + + def check_dendrogram_plot(self, orientation, xp): + # Tests dendrogram plotting. + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 219.0, 219.0, 0.0], + [0.0, 255.0, 255.0, 219.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [45.0, 45.0, 55.0, 55.0], + [35.0, 35.0, 50.0, 50.0], + [25.0, 25.0, 42.5, 42.5], + [10.0, 10.0, 33.75, 33.75]], + 'ivl': ['2', '5', '1', '0', '3', '4'], + 'leaves': [2, 5, 1, 0, 3, 4], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'], + } + + fig = plt.figure() + ax = fig.add_subplot(221) + + # test that dendrogram accepts ax keyword + R1 = dendrogram(Z, ax=ax, orientation=orientation) + R1['dcoord'] = np.asarray(R1['dcoord']) + assert_equal(R1, expected) + + # test that dendrogram accepts and handle the leaf_font_size and + # leaf_rotation keywords + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20, leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + assert_equal(testlabel.get_size(), 20) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_rotation=90) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_rotation(), 90) + dendrogram(Z, ax=ax, orientation=orientation, + leaf_font_size=20) + testlabel = ( + ax.get_xticklabels()[0] + if orientation in ['top', 'bottom'] + else ax.get_yticklabels()[0] + ) + assert_equal(testlabel.get_size(), 20) + plt.close() + + # test plotting to gca (will import pylab) + R2 = dendrogram(Z, orientation=orientation) + plt.close() + R2['dcoord'] = np.asarray(R2['dcoord']) + assert_equal(R2, expected) + + @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") + def test_dendrogram_truncate_mode(self, xp): + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + R = dendrogram(Z, 2, 'lastp', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C0'], + 'dcoord': [[0.0, 295.0, 295.0, 0.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0]], + 'ivl': ['(2)', '(4)'], + 'leaves': [6, 9], + 'leaves_color_list': ['C0', 'C0'], + }) + + R = dendrogram(Z, 2, 'mtica', show_contracted=True) + plt.close() + R['dcoord'] = np.asarray(R['dcoord']) + assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'], + 'dcoord': [[0.0, 138.0, 138.0, 0.0], + [0.0, 255.0, 255.0, 0.0], + [0.0, 268.0, 268.0, 255.0], + [138.0, 295.0, 295.0, 268.0]], + 'icoord': [[5.0, 5.0, 15.0, 15.0], + [35.0, 35.0, 45.0, 45.0], + [25.0, 25.0, 40.0, 40.0], + [10.0, 10.0, 32.5, 32.5]], + 'ivl': ['2', '5', '1', '0', '(2)'], + 'leaves': [2, 5, 1, 0, 7], + 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'], + }) + + def test_dendrogram_colors(self, xp): + # Tests dendrogram plots with alternate colors + Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single') + + set_link_color_palette(['c', 'm', 'y', 'k']) + R = dendrogram(Z, no_plot=True, + above_threshold_color='g', color_threshold=250) + set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) + + color_list = R['color_list'] + assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) + + # reset color palette (global list) + set_link_color_palette(None) + + def test_dendrogram_leaf_colors_zero_dist(self, xp): + # tests that the colors of leafs are correct for tree + # with two identical points + x = xp.asarray([[1, 0, 0], + [0, 0, 1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + def test_dendrogram_leaf_colors(self, xp): + # tests that the colors are correct for a tree + # with two near points ((0, 0, 1.1) and (0, 0, 1)) + x = xp.asarray([[1, 0, 0], + [0, 0, 1.1], + [0, 2, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 0]]) + z = linkage(x, "single") + d = dendrogram(z, no_plot=True) + exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2'] + colors = d["leaves_color_list"] + assert_equal(colors, exp_colors) + + +def calculate_maximum_distances(Z, xp): + # Used for testing correctness of maxdists. + n = Z.shape[0] + 1 + B = xp.zeros((n-1,), dtype=Z.dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = Z[i, 2] + B[i] = xp.max(q) + return B + + +def calculate_maximum_inconsistencies(Z, R, k=3, xp=np): + # Used for testing correctness of maxinconsts. + n = Z.shape[0] + 1 + dtype = xp.result_type(Z, R) + B = xp.zeros((n-1,), dtype=dtype) + q = xp.zeros((3,)) + for i in range(0, n - 1): + q[:] = 0.0 + left = Z[i, 0] + right = Z[i, 1] + if left >= n: + q[0] = B[xp.asarray(left, dtype=xp.int64) - n] + if right >= n: + q[1] = B[xp.asarray(right, dtype=xp.int64) - n] + q[2] = R[i, k] + B[i] = xp.max(q) + return B + + +@skip_if_array_api(cpu_only=True) +def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp): + assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]])) + + +def test_euclidean_linkage_value_error(xp): + for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: + assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]), + method=method, metric='cityblock') + + +@skip_if_array_api(cpu_only=True) +def test_2x2_linkage(xp): + Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean') + Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean') + xp_assert_close(Z1, Z2, rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_node_compare(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + tree = to_tree(Z) + assert_(tree > tree.get_left()) + assert_(tree.get_right() > tree.get_left()) + assert_(tree.get_right() == tree.get_right()) + assert_(tree.get_right() != tree.get_left()) + + +@skip_if_array_api(np_only=True, reasons=['`cut_tree` uses non-standard indexing']) +def test_cut_tree(xp): + np.random.seed(23) + nobs = 50 + X = np.random.randn(nobs, 4) + X = xp.asarray(X) + Z = scipy.cluster.hierarchy.ward(X) + cutree = cut_tree(Z) + + # cutree.dtype varies between int32 and int64 over platforms + xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False) + xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False) + assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1)) + + xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15) + xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15) + + nodes = _order_cluster_tree(Z) + heights = xp.asarray([node.dist for node in nodes]) + + xp_assert_close(cutree[:, np.searchsorted(heights, [5])], + cut_tree(Z, height=5), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])], + cut_tree(Z, height=[5, 10]), rtol=1e-15) + xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])], + cut_tree(Z, height=[10, 5]), rtol=1e-15) + + +@skip_if_array_api(cpu_only=True) +def test_optimal_leaf_ordering(xp): + # test with the distance vector y + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)), + xp.asarray(hierarchy_test_data.ytdist)) + expectedZ = hierarchy_test_data.linkage_ytdist_single_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10) + + # test with the observation matrix X + Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'), + xp.asarray(hierarchy_test_data.X)) + expectedZ = hierarchy_test_data.linkage_X_ward_olo + xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06) + + +@skip_if_array_api(np_only=True, reasons=['`Heap` only supports NumPy backend']) +def test_Heap(xp): + values = xp.asarray([2, -1, 0, -1.5, 3]) + heap = Heap(values) + + pair = heap.get_min() + assert_equal(pair['key'], 3) + assert_equal(pair['value'], -1.5) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], -1) + + heap.change_value(1, 2.5) + pair = heap.get_min() + assert_equal(pair['key'], 2) + assert_equal(pair['value'], 0) + + heap.remove_min() + heap.remove_min() + + heap.change_value(1, 10) + pair = heap.get_min() + assert_equal(pair['key'], 4) + assert_equal(pair['value'], 3) + + heap.remove_min() + pair = heap.get_min() + assert_equal(pair['key'], 1) + assert_equal(pair['value'], 10) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py new file mode 100644 index 0000000000000000000000000000000000000000..10b1416b83b44a3f5f4c8e9af396fab21f6ae1f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py @@ -0,0 +1,421 @@ +import warnings +import sys +from copy import deepcopy + +import numpy as np +from numpy.testing import ( + assert_array_equal, assert_equal, assert_, suppress_warnings +) +import pytest +from pytest import raises as assert_raises + +from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten, + ClusterError, _krandinit) +from scipy.cluster import _vq +from scipy.conftest import array_api_compatible +from scipy.sparse._sputils import matrix + +from scipy._lib._array_api import ( + SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal +) + +pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_if_array_api")] +skip_if_array_api = pytest.mark.skip_if_array_api + +TESTDATA_2D = np.array([ + -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, + -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, + 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, + -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, + -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, + -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, + 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, + -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, + -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, + -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, + 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, + -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, + 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, + -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, + 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, + -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, + 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, + 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, + -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, + 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, + -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, + -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, + -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, + 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, + -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, + 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, + 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, + -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, + 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, + 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, + -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, + -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, + 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, + -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, + -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, + -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, + 2.11]).reshape((200, 2)) + + +# Global data +X = np.array([[3.0, 3], [4, 3], [4, 2], + [9, 2], [5, 1], [6, 2], [9, 4], + [5, 2], [5, 4], [7, 4], [6, 5]]) + +CODET1 = np.array([[3.0000, 3.0000], + [6.2000, 4.0000], + [5.8000, 1.8000]]) + +CODET2 = np.array([[11.0/3, 8.0/3], + [6.7500, 4.2500], + [6.2500, 1.7500]]) + +LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) + + +class TestWhiten: + + def test_whiten(self, xp): + desired = xp.asarray([[5.08738849, 2.97091878], + [3.19909255, 0.69660580], + [4.51041982, 0.02640918], + [4.38567074, 0.95120889], + [2.32191480, 1.63195503]]) + + obs = xp.asarray([[0.98744510, 0.82766775], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + def test_whiten_zero_std(self, xp): + desired = xp.asarray([[0., 1.0, 2.86666544], + [0., 1.0, 1.32460034], + [0., 1.0, 3.74382172]]) + + obs = xp.asarray([[0., 1., 0.74109533], + [0., 1., 0.34243798], + [0., 1., 0.96785929]]) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + xp_assert_close(whiten(obs), desired, rtol=1e-5) + + assert_equal(len(w), 1) + assert_(issubclass(w[-1].category, RuntimeWarning)) + + def test_whiten_not_finite(self, xp): + for bad_value in xp.nan, xp.inf, -xp.inf: + obs = xp.asarray([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_whiten_not_finite_matrix(self, xp): + for bad_value in np.nan, np.inf, -np.inf: + obs = matrix([[0.98744510, bad_value], + [0.62093317, 0.19406729], + [0.87545741, 0.00735733], + [0.85124403, 0.26499712], + [0.45067590, 0.45464607]]) + assert_raises(ValueError, whiten, obs) + + +class TestVq: + + @skip_if_array_api(cpu_only=True) + def test_py_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0] + xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64), + check_dtype=False) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_py_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + # label1.dtype varies between int32 and int64 over platforms + label1 = py_vq(matrix(X), matrix(initc))[0] + assert_array_equal(label1, LABEL1) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test_vq(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(xp.asarray(X), xp.asarray(initc)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_vq_matrix(self, xp): + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + label1, _ = _vq.vq(matrix(X), matrix(initc)) + assert_array_equal(label1, LABEL1) + _, _ = vq(matrix(X), matrix(initc)) + + @skip_if_array_api(cpu_only=True) + def test_vq_1d(self, xp): + # Test special rank 1 vq algo, python implementation. + data = X[:, 0] + initc = data[:3] + a, b = _vq.vq(data, initc) + data = xp.asarray(data) + initc = xp.asarray(initc) + ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) + # ta.dtype varies between int32 and int64 over platforms + xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False) + xp_assert_equal(tb, xp.asarray(b)) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test__vq_sametype(self, xp): + a = xp.asarray([1.0, 2.0], dtype=xp.float64) + b = a.astype(xp.float32) + assert_raises(TypeError, _vq.vq, a, b) + + @skip_if_array_api(np_only=True, reasons=['`_vq` only supports NumPy backend']) + def test__vq_invalid_type(self, xp): + a = xp.asarray([1, 2], dtype=int) + assert_raises(TypeError, _vq.vq, a, a) + + @skip_if_array_api(cpu_only=True) + def test_vq_large_nfeat(self, xp): + X = np.random.rand(20, 20) + code_book = np.random.rand(3, 20) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + X = X.astype(np.float32) + code_book = code_book.astype(np.float32) + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + @skip_if_array_api(cpu_only=True) + def test_vq_large_features(self, xp): + X = np.random.rand(10, 5) * 1000000 + code_book = np.random.rand(2, 5) * 1000000 + + codes0, dis0 = _vq.vq(X, code_book) + codes1, dis1 = py_vq( + xp.asarray(X), xp.asarray(code_book) + ) + xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5) + # codes1.dtype varies between int32 and int64 over platforms + xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False) + + +# Whole class skipped on GPU for now; +# once pdist/cdist are hooked up for CuPy, more tests will work +@skip_if_array_api(cpu_only=True) +class TestKMean: + + def test_large_features(self, xp): + # Generate a data set with large values, and run kmeans on it to + # (regression for 1077). + d = 300 + n = 100 + + m1 = np.random.randn(d) + m2 = np.random.randn(d) + x = 10000 * np.random.randn(n, d) - 20000 * m1 + y = 10000 * np.random.randn(n, d) + 20000 * m2 + + data = np.empty((x.shape[0] + y.shape[0], d), np.float64) + data[:x.shape[0]] = x + data[x.shape[0]:] = y + + kmeans(xp.asarray(data), 2) + + def test_kmeans_simple(self, xp): + np.random.seed(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0] + xp_assert_close(code1, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans_simple_matrix(self, xp): + np.random.seed(54321) + initc = np.concatenate([[X[0]], [X[1]], [X[2]]]) + code1 = kmeans(matrix(X), matrix(initc), iter=1)[0] + xp_assert_close(code1, CODET2) + + def test_kmeans_lost_cluster(self, xp): + # This will cause kmeans to have a cluster with no points. + data = xp.asarray(TESTDATA_2D) + initk = xp.asarray([[-1.8127404, -0.67128041], + [2.04621601, 0.07401111], + [-2.31149087, -0.05160469]]) + + kmeans(data, initk) + with suppress_warnings() as sup: + sup.filter(UserWarning, + "One of the clusters is empty. Re-run kmeans with a " + "different initialization") + kmeans2(data, initk, missing='warn') + + assert_raises(ClusterError, kmeans2, data, initk, missing='raise') + + def test_kmeans2_simple(self, xp): + np.random.seed(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix] + for tp in arrays: + code1 = kmeans2(tp(X), tp(initc), iter=1)[0] + code2 = kmeans2(tp(X), tp(initc), iter=2)[0] + + xp_assert_close(code1, xp.asarray(CODET1)) + xp_assert_close(code2, xp.asarray(CODET2)) + + @pytest.mark.skipif(SCIPY_ARRAY_API, + reason='`np.matrix` unsupported in array API mode') + def test_kmeans2_simple_matrix(self, xp): + np.random.seed(12345678) + initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]])) + code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0] + code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0] + + xp_assert_close(code1, CODET1) + xp_assert_close(code2, CODET2) + + def test_kmeans2_rank1(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + + initc = data1[:3] + code = copy(initc, xp=xp) + kmeans2(data1, code, iter=1)[0] + kmeans2(data1, code, iter=2)[0] + + def test_kmeans2_rank1_2(self, xp): + data = xp.asarray(TESTDATA_2D) + data1 = data[:, 0] + kmeans2(data1, 2, iter=1) + + def test_kmeans2_high_dim(self, xp): + # test kmeans2 when the number of dimensions exceeds the number + # of input points + data = xp.asarray(TESTDATA_2D) + data = xp.reshape(data, (20, 20))[:10, :] + kmeans2(data, 2) + + def test_kmeans2_init(self, xp): + np.random.seed(12345) + data = xp.asarray(TESTDATA_2D) + k = 3 + + kmeans2(data, k, minit='points') + kmeans2(data[:, 1], k, minit='points') # special case (1-D) + + kmeans2(data, k, minit='++') + kmeans2(data[:, 1], k, minit='++') # special case (1-D) + + # minit='random' can give warnings, filter those + with suppress_warnings() as sup: + sup.filter(message="One of the clusters is empty. Re-run.") + kmeans2(data, k, minit='random') + kmeans2(data[:, 1], k, minit='random') # special case (1-D) + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MemoryError in Wine.') + def test_krandinit(self, xp): + data = xp.asarray(TESTDATA_2D) + datas = [xp.reshape(data, (200, 2)), + xp.reshape(data, (20, 20))[:10, :]] + k = int(1e6) + for data in datas: + rng = np.random.default_rng(1234) + init = _krandinit(data, k, rng, xp) + orig_cov = cov(data.T) + init_cov = cov(init.T) + xp_assert_close(orig_cov, init_cov, atol=1e-2) + + def test_kmeans2_empty(self, xp): + # Regression test for gh-1032. + assert_raises(ValueError, kmeans2, xp.asarray([]), 2) + + def test_kmeans_0k(self, xp): + # Regression test for gh-1073: fail when k arg is 0. + assert_raises(ValueError, kmeans, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), 0) + assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([])) + + def test_kmeans_large_thres(self, xp): + # Regression test for gh-1774 + x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64) + res = kmeans(x, 1, thresh=1e16) + xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()]) + + def test_kmeans2_kpp_low_dim(self, xp): + # Regression test for gh-11462 + prev_res = xp.asarray([[-1.95266667, 0.898], + [-3.153375, 3.3945]], dtype=xp.float64) + np.random.seed(42) + res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++') + xp_assert_close(res, prev_res) + + def test_kmeans2_kpp_high_dim(self, xp): + # Regression test for gh-11462 + n_dim = 100 + size = 10 + centers = np.vstack([5 * np.ones(n_dim), + -5 * np.ones(n_dim)]) + np.random.seed(42) + data = np.vstack([ + np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size), + np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size) + ]) + + data = xp.asarray(data) + res, _ = kmeans2(data, 2, minit='++') + xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers))) + + def test_kmeans_diff_convergence(self, xp): + # Regression test for gh-8727 + obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64) + res = kmeans(obs, xp.asarray([-3., 0.99])) + xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64)) + xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()]) + + def test_kmeans_and_kmeans2_random_seed(self, xp): + + seed_list = [ + 1234, np.random.RandomState(1234), np.random.default_rng(1234) + ] + + for seed in seed_list: + seed1 = deepcopy(seed) + seed2 = deepcopy(seed) + data = xp.asarray(TESTDATA_2D) + # test for kmeans + res1, _ = kmeans(data, 2, seed=seed1) + res2, _ = kmeans(data, 2, seed=seed2) + xp_assert_close(res1, res2, xp=xp) # should be same results + # test for kmeans2 + for minit in ["random", "points", "++"]: + res1, _ = kmeans2(data, 2, minit=minit, seed=seed1) + res2, _ = kmeans2(data, 2, minit=minit, seed=seed2) + xp_assert_close(res1, res2, xp=xp) # should be same results diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/cluster/vq.py b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/vq.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b5179ce524f2dbb15939d004b5f7ff3cfa6847 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/cluster/vq.py @@ -0,0 +1,835 @@ +""" +K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) +==================================================================== + +Provides routines for k-means clustering, generating code books +from k-means models and quantizing vectors by comparing them with +centroids in a code book. + +.. autosummary:: + :toctree: generated/ + + whiten -- Normalize a group of observations so each feature has unit variance + vq -- Calculate code book membership of a set of observation vectors + kmeans -- Perform k-means on a set of observation vectors forming k clusters + kmeans2 -- A different implementation of k-means with more methods + -- for initializing centroids + +Background information +---------------------- +The k-means algorithm takes as input the number of clusters to +generate, k, and a set of observation vectors to cluster. It +returns a set of centroids, one for each of the k clusters. An +observation vector is classified with the cluster number or +centroid index of the centroid closest to it. + +A vector v belongs to cluster i if it is closer to centroid i than +any other centroid. If v belongs to i, we say centroid i is the +dominating centroid of v. The k-means algorithm tries to +minimize distortion, which is defined as the sum of the squared distances +between each observation vector and its dominating centroid. +The minimization is achieved by iteratively reclassifying +the observations into clusters and recalculating the centroids until +a configuration is reached in which the centroids are stable. One can +also define a maximum number of iterations. + +Since vector quantization is a natural application for k-means, +information theory terminology is often used. The centroid index +or cluster index is also referred to as a "code" and the table +mapping codes to centroids and, vice versa, is often referred to as a +"code book". The result of k-means, a set of centroids, can be +used to quantize vectors. Quantization aims to find an encoding of +vectors that reduces the expected distortion. + +All routines expect obs to be an M by N array, where the rows are +the observation vectors. The codebook is a k by N array, where the +ith row is the centroid of code word i. The observation vectors +and centroids have the same feature dimension. + +As an example, suppose we wish to compress a 24-bit color image +(each pixel is represented by one byte for red, one for blue, and +one for green) before sending it over the web. By using a smaller +8-bit encoding, we can reduce the amount of data by two +thirds. Ideally, the colors for each of the 256 possible 8-bit +encoding values should be chosen to minimize distortion of the +color. Running k-means with k=256 generates a code book of 256 +codes, which fills up all possible 8-bit sequences. Instead of +sending a 3-byte value for each pixel, the 8-bit centroid index +(or code word) of the dominating centroid is transmitted. The code +book is also sent over the wire so each 8-bit code can be +translated back to a 24-bit pixel value representation. If the +image of interest was of an ocean, we would expect many 24-bit +blues to be represented by 8-bit codes. If it was an image of a +human face, more flesh-tone colors would be represented in the +code book. + +""" +import warnings +import numpy as np +from collections import deque +from scipy._lib._array_api import ( + _asarray, array_namespace, size, atleast_nd, copy, cov +) +from scipy._lib._util import check_random_state, rng_integers +from scipy.spatial.distance import cdist + +from . import _vq + +__docformat__ = 'restructuredtext' + +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] + + +class ClusterError(Exception): + pass + + +def whiten(obs, check_finite=True): + """ + Normalize a group of observations on a per feature basis. + + Before running k-means, it is beneficial to rescale each feature + dimension of the observation set by its standard deviation (i.e. "whiten" + it - as in "white noise" where each frequency has equal power). + Each feature is divided by its standard deviation across all observations + to give it unit variance. + + Parameters + ---------- + obs : ndarray + Each row of the array is an observation. The + columns are the features seen during each observation. + + >>> # f0 f1 f2 + >>> obs = [[ 1., 1., 1.], #o0 + ... [ 2., 2., 2.], #o1 + ... [ 3., 3., 3.], #o2 + ... [ 4., 4., 4.]] #o3 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + result : ndarray + Contains the values in `obs` scaled by the standard deviation + of each column. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import whiten + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7,]]) + >>> whiten(features) + array([[ 4.17944278, 2.69811351, 7.21248917], + [ 3.29956009, 2.93273208, 9.33380951], + [ 1.75976538, 0.7038557 , 7.21248917]]) + + """ + xp = array_namespace(obs) + obs = _asarray(obs, check_finite=check_finite, xp=xp) + std_dev = xp.std(obs, axis=0) + zero_std_mask = std_dev == 0 + if xp.any(zero_std_mask): + std_dev[zero_std_mask] = 1.0 + warnings.warn("Some columns have standard deviation zero. " + "The values of these columns will not change.", + RuntimeWarning, stacklevel=2) + return obs / std_dev + + +def vq(obs, code_book, check_finite=True): + """ + Assign codes from a code book to observations. + + Assigns a code from a code book to each observation. Each + observation vector in the 'M' by 'N' `obs` array is compared with the + centroids in the code book and assigned the code of the closest + centroid. + + The features in `obs` should have unit variance, which can be + achieved by passing them through the whiten function. The code + book can be created with the k-means algorithm or a different + encoding algorithm. + + Parameters + ---------- + obs : ndarray + Each row of the 'M' x 'N' array is an observation. The columns are + the "features" seen during each observation. The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray + The code book is usually generated using the k-means algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + + >>> # f0 f1 f2 f3 + >>> code_book = [ + ... [ 1., 2., 3., 4.], #c0 + ... [ 1., 2., 3., 4.], #c1 + ... [ 1., 2., 3., 4.]] #c2 + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + A length M array holding the code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq + >>> code_book = np.array([[1., 1., 1.], + ... [2., 2., 2.]]) + >>> features = np.array([[1.9, 2.3, 1.7], + ... [1.5, 2.5, 2.2], + ... [0.8, 0.6, 1.7]]) + >>> vq(features, code_book) + (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239])) + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + ct = xp.result_type(obs, code_book) + + c_obs = xp.astype(obs, ct, copy=False) + c_code_book = xp.astype(code_book, ct, copy=False) + + if xp.isdtype(ct, kind='real floating'): + c_obs = np.asarray(c_obs) + c_code_book = np.asarray(c_code_book) + result = _vq.vq(c_obs, c_code_book) + return xp.asarray(result[0]), xp.asarray(result[1]) + return py_vq(obs, code_book, check_finite=False) + + +def py_vq(obs, code_book, check_finite=True): + """ Python version of vq algorithm. + + The algorithm computes the Euclidean distance between each + observation and every frame in the code_book. + + Parameters + ---------- + obs : ndarray + Expects a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (e.g., columns) than obs. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + Returns + ------- + code : ndarray + code[i] gives the label of the ith obversation; its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + + Notes + ----- + This function is slower than the C version but works for + all input types. If the inputs have the wrong types for the + C versions of the function, this one is called as a last resort. + + It is about 20 times slower than the C version. + + """ + xp = array_namespace(obs, code_book) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + code_book = _asarray(code_book, xp=xp, check_finite=check_finite) + + if obs.ndim != code_book.ndim: + raise ValueError("Observation and code_book should have the same rank") + + if obs.ndim == 1: + obs = obs[:, xp.newaxis] + code_book = code_book[:, xp.newaxis] + + # Once `cdist` has array API support, this `xp.asarray` call can be removed + dist = xp.asarray(cdist(obs, code_book)) + code = xp.argmin(dist, axis=1) + min_dist = xp.min(dist, axis=1) + return code, min_dist + + +def _kmeans(obs, guess, thresh=1e-5, xp=None): + """ "raw" version of k-means. + + Returns + ------- + code_book + The lowest distortion codebook found. + avg_dist + The average distance a observation is from a code in the book. + Lower means the code_book matches the data better. + + See Also + -------- + kmeans : wrapper around k-means + + Examples + -------- + Note: not whitened in this example. + + >>> import numpy as np + >>> from scipy.cluster.vq import _kmeans + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = np.array((features[0],features[2])) + >>> _kmeans(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) + + """ + xp = np if xp is None else xp + code_book = guess + diff = xp.inf + prev_avg_dists = deque([diff], maxlen=2) + while diff > thresh: + # compute membership and distances between obs and code_book + obs_code, distort = vq(obs, code_book, check_finite=False) + prev_avg_dists.append(xp.mean(distort, axis=-1)) + # recalc code_book as centroids of associated obs + obs = np.asarray(obs) + obs_code = np.asarray(obs_code) + code_book, has_members = _vq.update_cluster_means(obs, obs_code, + code_book.shape[0]) + obs = xp.asarray(obs) + obs_code = xp.asarray(obs_code) + code_book = xp.asarray(code_book) + has_members = xp.asarray(has_members) + code_book = code_book[has_members] + diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1]) + + return code_book, prev_avg_dists[1] + + +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True, + *, seed=None): + """ + Performs k-means on a set of observation vectors forming k clusters. + + The k-means algorithm adjusts the classification of the observations + into clusters and updates the cluster centroids until the position of + the centroids is stable over successive iterations. In this + implementation of the algorithm, the stability of the centroids is + determined by comparing the absolute value of the change in the average + Euclidean distance between the observations and their corresponding + centroids against a threshold. This yields + a code book mapping centroids to codes and vice versa. + + Parameters + ---------- + obs : ndarray + Each row of the M by N array is an observation vector. The + columns are the features seen during each observation. + The features must be whitened first with the `whiten` function. + + k_or_guess : int or ndarray + The number of centroids to generate. A code is assigned to + each centroid, which is also the row index of the centroid + in the code_book matrix generated. + + The initial k centroids are chosen by randomly selecting + observations from the observation matrix. Alternatively, + passing a k by N array specifies the initial k centroids. + + iter : int, optional + The number of times to run k-means, returning the codebook + with the lowest distortion. This argument is ignored if + initial centroids are specified with an array for the + ``k_or_guess`` parameter. This parameter does not represent the + number of iterations of the k-means algorithm. + + thresh : float, optional + Terminates the k-means algorithm if the change in + distortion since the last k-means iteration is less than + or equal to threshold. + + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + codebook : ndarray + A k by N array of k centroids. The ith centroid + codebook[i] is represented with the code i. The centroids + and codes generated represent the lowest distortion seen, + not necessarily the globally minimal distortion. + Note that the number of centroids is not necessarily the same as the + ``k_or_guess`` parameter, because centroids assigned to no observations + are removed during iterations. + + distortion : float + The mean (non-squared) Euclidean distance between the observations + passed and the centroids generated. Note the difference to the standard + definition of distortion in the context of the k-means algorithm, which + is the sum of the squared distances. + + See Also + -------- + kmeans2 : a different implementation of k-means clustering + with more methods for generating initial centroids but without + using a distortion change threshold as a stopping criterion. + + whiten : must be called prior to passing an observation matrix + to kmeans. + + Notes + ----- + For more functionalities or optimal performance, you can use + `sklearn.cluster.KMeans `_. + `This `_ + is a benchmark result of several implementations. + + Examples + -------- + >>> import numpy as np + >>> from scipy.cluster.vq import vq, kmeans, whiten + >>> import matplotlib.pyplot as plt + >>> features = np.array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = np.array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], # random + [ 0.93218041, 1.24398691]]), 0.85684700941625547) + + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], # random + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) + + >>> # Create 50 datapoints in two clusters a and b + >>> pts = 50 + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) + >>> b = rng.multivariate_normal([30, 10], + ... [[10, 2], [2, 1]], + ... size=pts) + >>> features = np.concatenate((a, b)) + >>> # Whiten data + >>> whitened = whiten(features) + >>> # Find 2 clusters in the data + >>> codebook, distortion = kmeans(whitened, 2) + >>> # Plot whitened data and cluster centers in red + >>> plt.scatter(whitened[:, 0], whitened[:, 1]) + >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') + >>> plt.show() + + """ + if isinstance(k_or_guess, int): + xp = array_namespace(obs) + else: + xp = array_namespace(obs, k_or_guess) + obs = _asarray(obs, xp=xp, check_finite=check_finite) + guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite) + if iter < 1: + raise ValueError("iter must be at least 1, got %s" % iter) + + # Determine whether a count (scalar) or an initial guess (array) was passed. + if size(guess) != 1: + if size(guess) < 1: + raise ValueError("Asked for 0 clusters. Initial book was %s" % + guess) + return _kmeans(obs, guess, thresh=thresh, xp=xp) + + # k_or_guess is a scalar, now verify that it's an integer + k = int(guess) + if k != guess: + raise ValueError("If k_or_guess is a scalar, it must be an integer.") + if k < 1: + raise ValueError("Asked for %d clusters." % k) + + rng = check_random_state(seed) + + # initialize best distance value to a large value + best_dist = xp.inf + for i in range(iter): + # the initial code book is randomly selected from observations + guess = _kpoints(obs, k, rng, xp) + book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp) + if dist < best_dist: + best_book = book + best_dist = dist + return best_book, best_dist + + +def _kpoints(data, k, rng, xp): + """Pick k points at random in data (one row = one observation). + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + idx = rng.choice(data.shape[0], size=int(k), replace=False) + # convert to array with default integer dtype (avoids numpy#25607) + idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype) + return xp.take(data, idx, axis=0) + + +def _krandinit(data, k, rng, xp): + """Returns k samples of a random variable whose parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable whose mean and covariances are the ones estimated from the data. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + x : ndarray + A 'k' by 'N' containing the initial centroids + + """ + mu = xp.mean(data, axis=0) + k = np.asarray(k) + + if data.ndim == 1: + _cov = cov(data) + x = rng.standard_normal(size=k) + x = xp.asarray(x) + x *= xp.sqrt(_cov) + elif data.shape[1] > data.shape[0]: + # initialize when the covariance matrix is rank deficient + _, s, vh = xp.linalg.svd(data - mu, full_matrices=False) + x = rng.standard_normal(size=(k, size(s))) + x = xp.asarray(x) + sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.)) + x = x @ sVh + else: + _cov = atleast_nd(cov(data.T), ndim=2) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = rng.standard_normal(size=(k, size(mu))) + x = xp.asarray(x) + x = x @ xp.linalg.cholesky(_cov).T + + x += mu + return x + + +def _kpp(data, k, rng, xp): + """ Picks k points in the data based on the kmeans++ method. + + Parameters + ---------- + data : ndarray + Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D + data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + rng : `numpy.random.Generator` or `numpy.random.RandomState` + Random number generator. + + Returns + ------- + init : ndarray + A 'k' by 'N' containing the initial centroids. + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + """ + + ndim = len(data.shape) + if ndim == 1: + data = data[:, None] + + dims = data.shape[1] + + init = xp.empty((int(k), dims)) + + for i in range(k): + if i == 0: + init[i, :] = data[rng_integers(rng, data.shape[0]), :] + + else: + D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0) + probs = D2/D2.sum() + cumprobs = probs.cumsum() + r = rng.uniform() + cumprobs = np.asarray(cumprobs) + init[i, :] = data[np.searchsorted(cumprobs, r), :] + + if ndim == 1: + init = init[:, 0] + return init + + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp} + + +def _missing_warn(): + """Print a warning when called.""" + warnings.warn("One of the clusters is empty. " + "Re-run kmeans with a different initialization.", + stacklevel=3) + + +def _missing_raise(): + """Raise a ClusterError when called.""" + raise ClusterError("One of the clusters is empty. " + "Re-run kmeans with a different initialization.") + + +_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} + + +def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', + missing='warn', check_finite=True, *, seed=None): + """ + Classify a set of observations into k clusters using the k-means algorithm. + + The algorithm attempts to minimize the Euclidean distance between + observations and centroids. Several initialization methods are + included. + + Parameters + ---------- + data : ndarray + A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length + 'M' array of 'M' 1-D observations. + k : int or ndarray + The number of clusters to form as well as the number of + centroids to generate. If `minit` initialization string is + 'matrix', or if a ndarray is given instead, it is + interpreted as initial cluster to use instead. + iter : int, optional + Number of iterations of the k-means algorithm to run. Note + that this differs in meaning from the iters parameter to + the kmeans function. + thresh : float, optional + (not used yet) + minit : str, optional + Method for initialization. Available methods are 'random', + 'points', '++' and 'matrix': + + 'random': generate k centroids from a Gaussian with mean and + variance estimated from the data. + + 'points': choose k observations (rows) at random from data for + the initial centroids. + + '++': choose k observations accordingly to the kmeans++ method + (careful seeding) + + 'matrix': interpret the k parameter as a k by M (or length k + array for 1-D data) array of initial centroids. + missing : str, optional + Method to deal with empty clusters. Available methods are + 'warn' and 'raise': + + 'warn': give a warning and continue. + + 'raise': raise an ClusterError and terminate the algorithm. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default: True + seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional + Seed for initializing the pseudo-random number generator. + If `seed` is None (or `numpy.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + The default is None. + + Returns + ------- + centroid : ndarray + A 'k' by 'N' array of centroids found at the last iteration of + k-means. + label : ndarray + label[i] is the code or index of the centroid the + ith observation is closest to. + + See Also + -------- + kmeans + + References + ---------- + .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of + careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium + on Discrete Algorithms, 2007. + + Examples + -------- + >>> from scipy.cluster.vq import kmeans2 + >>> import matplotlib.pyplot as plt + >>> import numpy as np + + Create z, an array with shape (100, 2) containing a mixture of samples + from three multivariate normal distributions. + + >>> rng = np.random.default_rng() + >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45) + >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30) + >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25) + >>> z = np.concatenate((a, b, c)) + >>> rng.shuffle(z) + + Compute three clusters. + + >>> centroid, label = kmeans2(z, 3, minit='points') + >>> centroid + array([[ 2.22274463, -0.61666946], # may vary + [ 0.54069047, 5.86541444], + [ 6.73846769, 4.01991898]]) + + How many points are in each cluster? + + >>> counts = np.bincount(label) + >>> counts + array([29, 51, 20]) # may vary + + Plot the clusters. + + >>> w0 = z[label == 0] + >>> w1 = z[label == 1] + >>> w2 = z[label == 2] + >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0') + >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1') + >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2') + >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids') + >>> plt.axis('equal') + >>> plt.legend(shadow=True) + >>> plt.show() + + """ + if int(iter) < 1: + raise ValueError("Invalid iter (%s), " + "must be a positive integer." % iter) + try: + miss_meth = _valid_miss_meth[missing] + except KeyError as e: + raise ValueError(f"Unknown missing method {missing!r}") from e + + if isinstance(k, int): + xp = array_namespace(data) + else: + xp = array_namespace(data, k) + data = _asarray(data, xp=xp, check_finite=check_finite) + code_book = copy(k, xp=xp) + if data.ndim == 1: + d = 1 + elif data.ndim == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 is not supported.") + + if size(data) < 1 or size(code_book) < 1: + raise ValueError("Empty input is not supported.") + + # If k is not a single value, it should be compatible with data's shape + if minit == 'matrix' or size(code_book) > 1: + if data.ndim != code_book.ndim: + raise ValueError("k array doesn't match data rank") + nc = code_book.shape[0] + if data.ndim > 1 and code_book.shape[1] != d: + raise ValueError("k array doesn't match data dimension") + else: + nc = int(code_book) + + if nc < 1: + raise ValueError("Cannot ask kmeans2 for %d clusters" + " (k was %s)" % (nc, code_book)) + elif nc != code_book: + warnings.warn("k was not an integer, was converted.", stacklevel=2) + + try: + init_meth = _valid_init_meth[minit] + except KeyError as e: + raise ValueError(f"Unknown init method {minit!r}") from e + else: + rng = check_random_state(seed) + code_book = init_meth(data, code_book, rng, xp) + + data = np.asarray(data) + code_book = np.asarray(code_book) + for i in range(iter): + # Compute the nearest neighbor for each obs using the current code book + label = vq(data, code_book, check_finite=check_finite)[0] + # Update the code book by computing centroids + new_code_book, has_members = _vq.update_cluster_means(data, label, nc) + if not has_members.all(): + miss_meth() + # Set the empty clusters to their previous positions + new_code_book[~has_members] = code_book[~has_members] + code_book = new_code_book + + return xp.asarray(code_book), xp.asarray(label) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/conftest.py b/llmeval-env/lib/python3.10/site-packages/scipy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..577987a4ac744dc598f05cfea68ce357917a8874 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/conftest.py @@ -0,0 +1,238 @@ +# Pytest customization +import json +import os +import warnings +import tempfile + +import numpy as np +import numpy.testing as npt +import pytest +import hypothesis + +from scipy._lib._fpumode import get_fpu_mode +from scipy._lib._testutils import FPUModeChangeWarning +from scipy._lib import _pep440 +from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE + + +def pytest_configure(config): + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "xslow: mark test as extremely slow (not run unless explicitly requested)") + config.addinivalue_line("markers", + "xfail_on_32bit: mark test as failing on 32-bit platforms") + try: + import pytest_timeout # noqa:F401 + except Exception: + config.addinivalue_line( + "markers", 'timeout: mark a test for a non-default timeout') + config.addinivalue_line("markers", + "skip_if_array_api(*backends, reasons=None, np_only=False, cpu_only=False): " + "mark the desired skip configuration for the `skip_if_array_api` fixture.") + + +def _get_mark(item, name): + if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"): + mark = item.get_closest_marker(name) + else: + mark = item.get_marker(name) + return mark + + +def pytest_runtest_setup(item): + mark = _get_mark(item, "xslow") + if mark is not None: + try: + v = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + v = False + if not v: + pytest.skip("very slow test; " + "set environment variable SCIPY_XSLOW=1 to run it") + mark = _get_mark(item, 'xfail_on_32bit') + if mark is not None and np.intp(0).itemsize < 8: + pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}') + + # Older versions of threadpoolctl have an issue that may lead to this + # warning being emitted, see gh-14441 + with npt.suppress_warnings() as sup: + sup.filter(pytest.PytestUnraisableExceptionWarning) + + try: + from threadpoolctl import threadpool_limits + + HAS_THREADPOOLCTL = True + except Exception: # observed in gh-14441: (ImportError, AttributeError) + # Optional dependency only. All exceptions are caught, for robustness + HAS_THREADPOOLCTL = False + + if HAS_THREADPOOLCTL: + # Set the number of openmp threads based on the number of workers + # xdist is using to prevent oversubscription. Simplified version of what + # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper + # functions) + try: + xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT']) + except KeyError: + # raises when pytest-xdist is not installed + return + + if not os.getenv('OMP_NUM_THREADS'): + max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores + threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1) + try: + threadpool_limits(threads_per_worker, user_api='blas') + except Exception: + # May raise AttributeError for older versions of OpenBLAS. + # Catch any error for robustness. + return + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + warnings.warn(f"FPU mode changed from {old_mode:#x} to {new_mode:#x} during " + "the test", + category=FPUModeChangeWarning, stacklevel=0) + + +# Array API backend handling +xp_available_backends = {'numpy': np} + +if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str): + # fill the dict of backends with available libraries + try: + import array_api_strict + xp_available_backends.update({'array_api_strict': array_api_strict}) + except ImportError: + pass + + try: + import torch # type: ignore[import] + xp_available_backends.update({'pytorch': torch}) + # can use `mps` or `cpu` + torch.set_default_device(SCIPY_DEVICE) + except ImportError: + pass + + try: + import cupy # type: ignore[import] + xp_available_backends.update({'cupy': cupy}) + except ImportError: + pass + + # by default, use all available backends + if SCIPY_ARRAY_API.lower() not in ("1", "true"): + SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API) + + if 'all' in SCIPY_ARRAY_API_: + pass # same as True + else: + # only select a subset of backend by filtering out the dict + try: + xp_available_backends = { + backend: xp_available_backends[backend] + for backend in SCIPY_ARRAY_API_ + } + except KeyError: + msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}" + raise ValueError(msg) + +if 'cupy' in xp_available_backends: + SCIPY_DEVICE = 'cuda' + +array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values()) + + +@pytest.fixture +def skip_if_array_api(xp, request): + """ + Skip based on the ``skip_if_array_api`` marker. + + Parameters + ---------- + *backends : tuple + Backends to skip, e.g. ``("array_api_strict", "torch")``. + These are overriden when ``np_only`` is ``True``, and are not + necessary to provide for non-CPU backends when ``cpu_only`` is ``True``. + reasons : list, optional + A list of reasons for each skip. When ``np_only`` is ``True``, + this should be a singleton list. Otherwise, this should be a list + of reasons, one for each corresponding backend in ``backends``. + If unprovided, default reasons are used. Note that it is not possible + to specify a custom reason with ``cpu_only``. Default: ``None``. + np_only : bool, optional + When ``True``, the test is skipped for all backends other + than the default NumPy backend. There is no need to provide + any ``backends`` in this case. To specify a reason, pass a + singleton list to ``reasons``. Default: ``False``. + cpu_only : bool, optional + When ``True``, the test is skipped on non-CPU devices. + There is no need to provide any ``backends`` in this case, + but any ``backends`` will also be skipped on the CPU. + Default: ``False``. + """ + if "skip_if_array_api" not in request.keywords: + return + backends = request.keywords["skip_if_array_api"].args + kwargs = request.keywords["skip_if_array_api"].kwargs + np_only = kwargs.get("np_only", False) + cpu_only = kwargs.get("cpu_only", False) + if np_only: + reasons = kwargs.get("reasons", ["do not run with non-NumPy backends."]) + reason = reasons[0] + if xp.__name__ != 'numpy': + pytest.skip(reason=reason) + return + if cpu_only: + reason = "do not run with `SCIPY_ARRAY_API` set and not on CPU" + if SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu': + if xp.__name__ == 'cupy': + pytest.skip(reason=reason) + elif xp.__name__ == 'torch': + if 'cpu' not in torch.empty(0).device.type: + pytest.skip(reason=reason) + if backends is not None: + reasons = kwargs.get("reasons", False) + for i, backend in enumerate(backends): + if xp.__name__ == backend: + if not reasons: + reason = f"do not run with array API backend: {backend}" + else: + reason = reasons[i] + pytest.skip(reason=reason) + + +# Following the approach of NumPy's conftest.py... +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for SciPy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via scipy.test() +hypothesis.settings.register_profile( + name="nondeterministic", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="deterministic", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=list(hypothesis.HealthCheck), +) + +# Profile is currently set by environment variable `SCIPY_HYPOTHESIS_PROFILE` +# In the future, it would be good to work the choice into dev.py. +SCIPY_HYPOTHESIS_PROFILE = os.environ.get("SCIPY_HYPOTHESIS_PROFILE", + "deterministic") +hypothesis.settings.load_profile(SCIPY_HYPOTHESIS_PROFILE) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c8f394be2a3bd89691daae51841594e01154516 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0b7300240f034bac816d59b650193b56845e654 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..741fe7fb3a167783ce1c030ae46322faf2de2768 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1775a475c7477c6d63690fccd709ac72fc1f707b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48fec4eba3f5292ada399d083412371203dd22cb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461f80403f2d9330b36dc6343bbedaf0903aff77 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55aeed66664178e92c70bb13b5adad6baac01649 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..f94ebbe71b5ca91b87865153db572db430205443 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py @@ -0,0 +1,123 @@ +from scipy.datasets._registry import registry +from scipy.datasets._fetchers import data_fetcher +from scipy.datasets._utils import _clear_cache +from scipy.datasets import ascent, face, electrocardiogram, download_all +from numpy.testing import assert_equal, assert_almost_equal +import os +import pytest + +try: + import pooch +except ImportError: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + + +data_dir = data_fetcher.path # type: ignore + + +def _has_hash(path, expected_hash): + """Check if the provided path has the expected hash.""" + if not os.path.exists(path): + return False + return pooch.file_hash(path) == expected_hash + + +class TestDatasets: + + @pytest.fixture(scope='module', autouse=True) + def test_download_all(self): + # This fixture requires INTERNET CONNECTION + + # test_setup phase + download_all() + + yield + + def test_existence_all(self): + assert len(os.listdir(data_dir)) >= len(registry) + + def test_ascent(self): + assert_equal(ascent().shape, (512, 512)) + + # hash check + assert _has_hash(os.path.join(data_dir, "ascent.dat"), + registry["ascent.dat"]) + + def test_face(self): + assert_equal(face().shape, (768, 1024, 3)) + + # hash check + assert _has_hash(os.path.join(data_dir, "face.dat"), + registry["face.dat"]) + + def test_electrocardiogram(self): + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert_equal(ecg.dtype, float) + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) + + # hash check + assert _has_hash(os.path.join(data_dir, "ecg.dat"), + registry["ecg.dat"]) + + +def test_clear_cache(tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + dummy_basepath = tmp_path / "dummy_cache_dir" + dummy_basepath.mkdir() + + # Create three dummy dataset files for dummy dataset methods + dummy_method_map = {} + for i in range(4): + dummy_method_map[f"data{i}"] = [f"data{i}.dat"] + data_filepath = dummy_basepath / f"data{i}.dat" + data_filepath.write_text("") + + # clear files associated to single dataset method data0 + # also test callable argument instead of list of callables + def data0(): + pass + _clear_cache(datasets=data0, cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data0.dat") + + # clear files associated to multiple dataset methods "data3" and "data4" + def data1(): + pass + + def data2(): + pass + _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data1.dat") + assert not os.path.exists(dummy_basepath/"data2.dat") + + # clear multiple dataset files "data3_0.dat" and "data3_1.dat" + # associated with dataset method "data3" + def data4(): + pass + # create files + (dummy_basepath / "data4_0.dat").write_text("") + (dummy_basepath / "data4_1.dat").write_text("") + + dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"] + _clear_cache(datasets=[data4], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data4_0.dat") + assert not os.path.exists(dummy_basepath/"data4_1.dat") + + # wrong dataset method should raise ValueError since it + # doesn't exist in the dummy_method_map + def data5(): + pass + with pytest.raises(ValueError): + _clear_cache(datasets=[data5], cache_dir=dummy_basepath, + method_map=dummy_method_map) + + # remove all dataset cache + _clear_cache(datasets=None, cache_dir=dummy_basepath) + assert not os.path.exists(dummy_basepath) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/linalg.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/linalg.pxd new file mode 100644 index 0000000000000000000000000000000000000000..c7c49f9ad8f4c90ee93725fdcdbe73c2e7f5aacd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/linalg.pxd @@ -0,0 +1 @@ +from scipy.linalg cimport cython_blas, cython_lapack diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/optimize.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/optimize.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2402eeb020d34ad8b82e287e32545423911ff66c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/optimize.pxd @@ -0,0 +1 @@ +from .optimize cimport cython_optimize diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/signal/wavelets.py b/llmeval-env/lib/python3.10/site-packages/scipy/signal/wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..0391ec2ab35dd9787e7f254765c012808d40996c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/signal/wavelets.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.signal` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt', + 'eig', 'comb', 'convolve' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="signal", module="wavelets", + private_modules=["_wavelets"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/special.pxd b/llmeval-env/lib/python3.10/site-packages/scipy/special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1daa9fb379572aac4bc9b6d74330a18c5c52bf79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/special.pxd @@ -0,0 +1 @@ +from scipy.special cimport cython_special diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/version.py b/llmeval-env/lib/python3.10/site-packages/scipy/version.py new file mode 100644 index 0000000000000000000000000000000000000000..db73d1943ef5d7a2fcdd510aea849c3b4901984b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/version.py @@ -0,0 +1,12 @@ +# THIS FILE IS GENERATED DURING THE SCIPY BUILD +# See tools/version_utils.py for details + +short_version = '1.13.0' +version = '1.13.0' +full_version = '1.13.0' +git_revision = '7dcd8c5' +commit_count = '1580' +release = True + +if not release: + version = full_version diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..9da6d09f0e1e81690a0a9f946721401ad2bbebc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/METADATA @@ -0,0 +1,154 @@ +Metadata-Version: 2.1 +Name: urllib3 +Version: 2.2.1 +Summary: HTTP library with thread-safe connection pooling, file post, and more. +Project-URL: Changelog, https://github.com/urllib3/urllib3/blob/main/CHANGES.rst +Project-URL: Documentation, https://urllib3.readthedocs.io +Project-URL: Code, https://github.com/urllib3/urllib3 +Project-URL: Issue tracker, https://github.com/urllib3/urllib3/issues +Author-email: Andrey Petrov +Maintainer-email: Seth Michael Larson , Quentin Pradet , Illia Volochii +License-File: LICENSE.txt +Keywords: filepost,http,httplib,https,pooling,ssl,threadsafe,urllib +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.8 +Provides-Extra: brotli +Requires-Dist: brotli>=1.0.9; (platform_python_implementation == 'CPython') and extra == 'brotli' +Requires-Dist: brotlicffi>=0.8.0; (platform_python_implementation != 'CPython') and extra == 'brotli' +Provides-Extra: h2 +Requires-Dist: h2<5,>=4; extra == 'h2' +Provides-Extra: socks +Requires-Dist: pysocks!=1.5.7,<2.0,>=1.5.6; extra == 'socks' +Provides-Extra: zstd +Requires-Dist: zstandard>=0.18.0; extra == 'zstd' +Description-Content-Type: text/markdown + +

+ +![urllib3](https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg) + +

+ +

+ PyPI Version + Python Versions + Join our Discord + Coverage Status + Build Status on GitHub + Documentation Status
+ OpenSSF Scorecard + SLSA 3 + CII Best Practices +

+ +urllib3 is a powerful, *user-friendly* HTTP client for Python. Much of the +Python ecosystem already uses urllib3 and you should too. +urllib3 brings many critical features that are missing from the Python +standard libraries: + +- Thread safety. +- Connection pooling. +- Client-side SSL/TLS verification. +- File uploads with multipart encoding. +- Helpers for retrying requests and dealing with HTTP redirects. +- Support for gzip, deflate, brotli, and zstd encoding. +- Proxy support for HTTP and SOCKS. +- 100% test coverage. + +urllib3 is powerful and easy to use: + +```python3 +>>> import urllib3 +>>> resp = urllib3.request("GET", "http://httpbin.org/robots.txt") +>>> resp.status +200 +>>> resp.data +b"User-agent: *\nDisallow: /deny\n" +``` + +## Installing + +urllib3 can be installed with [pip](https://pip.pypa.io): + +```bash +$ python -m pip install urllib3 +``` + +Alternatively, you can grab the latest source code from [GitHub](https://github.com/urllib3/urllib3): + +```bash +$ git clone https://github.com/urllib3/urllib3.git +$ cd urllib3 +$ pip install . +``` + + +## Documentation + +urllib3 has usage and reference documentation at [urllib3.readthedocs.io](https://urllib3.readthedocs.io). + + +## Community + +urllib3 has a [community Discord channel](https://discord.gg/urllib3) for asking questions and +collaborating with other contributors. Drop by and say hello 👋 + + +## Contributing + +urllib3 happily accepts contributions. Please see our +[contributing documentation](https://urllib3.readthedocs.io/en/latest/contributing.html) +for some tips on getting started. + + +## Security Disclosures + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure with maintainers. + + +## Maintainers + +- [@sethmlarson](https://github.com/sethmlarson) (Seth M. Larson) +- [@pquentin](https://github.com/pquentin) (Quentin Pradet) +- [@illia-v](https://github.com/illia-v) (Illia Volochii) +- [@theacodes](https://github.com/theacodes) (Thea Flowers) +- [@haikuginger](https://github.com/haikuginger) (Jess Shapiro) +- [@lukasa](https://github.com/lukasa) (Cory Benfield) +- [@sigmavirus24](https://github.com/sigmavirus24) (Ian Stapleton Cordasco) +- [@shazow](https://github.com/shazow) (Andrey Petrov) + +👋 + + +## Sponsorship + +If your company benefits from this library, please consider [sponsoring its +development](https://urllib3.readthedocs.io/en/latest/sponsors.html). + + +## For Enterprise + +Professional support for urllib3 is available as part of the [Tidelift +Subscription][1]. Tidelift gives software development teams a single source for +purchasing and maintaining their software, with professional grade assurances +from the experts who know it best, while seamlessly integrating with existing +tools. + +[1]: https://tidelift.com/subscription/pkg/pypi-urllib3?utm_source=pypi-urllib3&utm_medium=referral&utm_campaign=readme diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..2f90c8a4d9b3dfa426f453fdf3f47ade1614e09e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/RECORD @@ -0,0 +1,75 @@ +urllib3-2.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +urllib3-2.2.1.dist-info/METADATA,sha256=uROmjQwfAbwRYjV9PMdc5JF5NA3kRkpoKafPkNzybfc,6434 +urllib3-2.2.1.dist-info/RECORD,, +urllib3-2.2.1.dist-info/WHEEL,sha256=TJPnKdtrSue7xZ_AVGkp9YXcvDrobsjBds1du3Nx6dc,87 +urllib3-2.2.1.dist-info/licenses/LICENSE.txt,sha256=Ew46ZNX91dCWp1JpRjSn2d8oRGnehuVzIQAmgEHj1oY,1093 +urllib3/__init__.py,sha256=JMo1tg1nIV1AeJ2vENC_Txfl0e5h6Gzl9DGVk1rWRbo,6979 +urllib3/__pycache__/__init__.cpython-310.pyc,, +urllib3/__pycache__/_base_connection.cpython-310.pyc,, +urllib3/__pycache__/_collections.cpython-310.pyc,, +urllib3/__pycache__/_request_methods.cpython-310.pyc,, +urllib3/__pycache__/_version.cpython-310.pyc,, +urllib3/__pycache__/connection.cpython-310.pyc,, +urllib3/__pycache__/connectionpool.cpython-310.pyc,, +urllib3/__pycache__/exceptions.cpython-310.pyc,, +urllib3/__pycache__/fields.cpython-310.pyc,, +urllib3/__pycache__/filepost.cpython-310.pyc,, +urllib3/__pycache__/http2.cpython-310.pyc,, +urllib3/__pycache__/poolmanager.cpython-310.pyc,, +urllib3/__pycache__/response.cpython-310.pyc,, +urllib3/_base_connection.py,sha256=p-DOG_Me7-sJXO1R9VgDpNmdVU_kIS8VtaC7ptEllA0,5640 +urllib3/_collections.py,sha256=vzKA-7X-9resOamEWq52uV1nHshChjbYDvz47H0mMjw,17400 +urllib3/_request_methods.py,sha256=ucEpHQyQf06b9o1RxKLkCpzGH0ct-v7X2xGpU6rmmlo,9984 +urllib3/_version.py,sha256=12idLAcGmrAURPX52rGioBo33oQ__-ENJEdeqHvUUZg,98 +urllib3/connection.py,sha256=zFgaaoqrICsl7-kBp-_4va9m82sYhioAuy4-4iDpK0I,34704 +urllib3/connectionpool.py,sha256=XjTfYowLwN5ZzRMO41_OTbGNX4ANifgYVpWsVMRuC00,43556 +urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/contrib/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,, +urllib3/contrib/__pycache__/socks.cpython-310.pyc,, +urllib3/contrib/emscripten/__init__.py,sha256=u6KNgzjlFZbuAAXa_ybCR7gQ71VJESnF-IIdDA73brw,733 +urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc,, +urllib3/contrib/emscripten/connection.py,sha256=kaBe2tWt7Yy9vNUFRBV7CSyDnfhCYILGxju9KTZj8Sw,8755 +urllib3/contrib/emscripten/emscripten_fetch_worker.js,sha256=CDfYF_9CDobtx2lGidyJ1zjDEvwNT5F-dchmVWXDh0E,3655 +urllib3/contrib/emscripten/fetch.py,sha256=ymwJlHBBuw6WTpKgPHpdmmrNBxlsr75HqoD4Rn27YXk,14131 +urllib3/contrib/emscripten/request.py,sha256=mL28szy1KvE3NJhWor5jNmarp8gwplDU-7gwGZY5g0Q,566 +urllib3/contrib/emscripten/response.py,sha256=wIDmdJ4doFWqLl5s86l9n0V70gFjQ2HWaPgz69jM52E,9546 +urllib3/contrib/pyopenssl.py,sha256=X31eCYGwB09EkAHX8RhDKC0X0Ki7d0cCVWoMJZUM5bQ,19161 +urllib3/contrib/socks.py,sha256=gFS2-zOw4_vLGpUvExOf3fNVT8liz6vhM2t6lBPn3CY,7572 +urllib3/exceptions.py,sha256=RDaiudtR7rqbVKTKpLSgZBBtwaIqV7eZtervZV_mZag,9393 +urllib3/fields.py,sha256=8vi0PeRo_pE5chPmJA07LZtMkVls4UrBS1k2xM506jM,10843 +urllib3/filepost.py,sha256=-9qJT11cNGjO9dqnI20-oErZuTvNaM18xZZPCjZSbOE,2395 +urllib3/http2.py,sha256=4QQcjTM9UYOQZe0r8KnA8anU9ST4p_s3SB3gRTueyPc,7480 +urllib3/poolmanager.py,sha256=fcC3OwjFKxha06NsOORwbZOzrVt1pyY-bNCbKiqC0l8,22935 +urllib3/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93 +urllib3/response.py,sha256=lmvseToQbkLXuFyA3jcSyCPjTgSfa6YPA4xUhVqq8QI,43874 +urllib3/util/__init__.py,sha256=-qeS0QceivazvBEKDNFCAI-6ACcdDOE4TMvo7SLNlAQ,1001 +urllib3/util/__pycache__/__init__.cpython-310.pyc,, +urllib3/util/__pycache__/connection.cpython-310.pyc,, +urllib3/util/__pycache__/proxy.cpython-310.pyc,, +urllib3/util/__pycache__/request.cpython-310.pyc,, +urllib3/util/__pycache__/response.cpython-310.pyc,, +urllib3/util/__pycache__/retry.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,, +urllib3/util/__pycache__/ssltransport.cpython-310.pyc,, +urllib3/util/__pycache__/timeout.cpython-310.pyc,, +urllib3/util/__pycache__/url.cpython-310.pyc,, +urllib3/util/__pycache__/util.cpython-310.pyc,, +urllib3/util/__pycache__/wait.cpython-310.pyc,, +urllib3/util/connection.py,sha256=QeUUEuNmhznpuKNPL-B0IVOkMdMCu8oJX62OC0Vpzug,4462 +urllib3/util/proxy.py,sha256=seP8-Q5B6bB0dMtwPj-YcZZQ30vHuLqRu-tI0JZ2fzs,1148 +urllib3/util/request.py,sha256=PQnBmKUHMQ0hQQ41uhbLNAeA24ke60m6zeiwfwocpGo,8102 +urllib3/util/response.py,sha256=vQE639uoEhj1vpjEdxu5lNIhJCSUZkd7pqllUI0BZOA,3374 +urllib3/util/retry.py,sha256=WB-7x1m7fQH_-Qqtrk2OGvz93GvBTxc-pRn8Vf3p4mg,18384 +urllib3/util/ssl_.py,sha256=FeymdS68RggEROwMB9VLGSqLHq2hRUKnIbQC_bCpGJI,19109 +urllib3/util/ssl_match_hostname.py,sha256=gaWqixoYtQ_GKO8fcRGFj3VXeMoqyxQQuUTPgWeiL_M,5812 +urllib3/util/ssltransport.py,sha256=SF__JQXVcHBQniFJZp3P9q-UeHM310WVwcBwqT9dCLE,9034 +urllib3/util/timeout.py,sha256=4eT1FVeZZU7h7mYD1Jq2OXNe4fxekdNvhoWUkZusRpA,10346 +urllib3/util/url.py,sha256=wHORhp80RAXyTlAIkTqLFzSrkU7J34ZDxX-tN65MBZk,15213 +urllib3/util/util.py,sha256=j3lbZK1jPyiwD34T8IgJzdWEZVT-4E-0vYIJi9UjeNA,1146 +urllib3/util/wait.py,sha256=_ph8IrUR3sqPqi0OopQgJUlH4wzkGeM5CiyA7XGGtmI,4423 diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5998f3aab327ceb8cb346647a3461e220359aebf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/licenses/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6183d0276b26c5b87aecccf8d0d5bcd7b1148d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/urllib3-2.2.1.dist-info/licenses/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2008-2020 Andrey Petrov and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.