diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3da5cba7c1ab11b03e07439cdbadf65854f9db53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__version__ = "0.30.0" + +from .accelerator import Accelerator +from .big_modeling import ( + cpu_offload, + cpu_offload_with_hook, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from .data_loader import skip_first_batches +from .inference import prepare_pippy +from .launchers import debug_launcher, notebook_launcher +from .state import PartialState +from .utils import ( + AutocastKwargs, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + find_executable_batch_size, + infer_auto_device_map, + is_rich_available, + load_checkpoint_in_model, + synchronize_rng_states, +) + + +if is_rich_available(): + from .utils import rich diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78bf29dcd3253cd2c8508955510699f33505a73f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d0cf3cb3d0239bbe6387a8ca81646a7bc072e1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..563a659995d31f7ad5581954bc9cd6422513c551 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aea925f46df3932528f150a452b4a0534674a022 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f428cb97c9af7fcb8068eb88ab1e0ce17646e85 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b6fdc825e41aabb6fcc4d5b976d0aa7e83e3147 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2479b1a844351c4340a6846db0831e994304c1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50103fecdb48b63ec4acb9bbf181de3354896895 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43c064e875e2accd12cdda8513540187edd45120 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e9d0bbd8409ede982a729a35b73e46ab40f1f61 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e60f4600773c195803024f5cc9f94e6b98e93e1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ad405820e3a171417f06af2e47a380895ddd04 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a213a603aa75d6b2e18bd244c8f86baeaaead873 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..121de0b79a6f0a4317a2ff5bf146c9fce631000b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7dca99c82ea94b177eb8ccd54dd0cea1bdaf52 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/accelerator.py b/llmeval-env/lib/python3.10/site-packages/accelerate/accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5b1ba013276a80a16e1e4e4a45ab637d26abcb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/accelerator.py @@ -0,0 +1,3408 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import contextlib +import functools +import json +import math +import os +import re +import shutil +import sys +import warnings +from collections import OrderedDict +from contextlib import contextmanager +from functools import partial +from types import MethodType +from typing import Any, Callable, Union + +import torch +import torch.utils.hooks as hooks + +from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state +from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches +from .hooks import AlignDevicesHook +from .logging import get_logger +from .optimizer import AcceleratedOptimizer +from .scheduler import AcceleratedScheduler +from .state import AcceleratorState, GradientState, PartialState +from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers +from .utils import ( + MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + AutocastKwargs, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + TorchDynamoPlugin, + check_os_kernel, + clean_state_dict_for_safetensors, + compare_versions, + convert_model, + convert_outputs_to_fp32, + extract_model_from_parallel, + gather, + gather_object, + get_mixed_precision_context_manager, + get_pretty_name, + has_transformer_engine_layers, + is_bf16_available, + is_deepspeed_available, + is_fp8_available, + is_ipex_available, + is_lomo_available, + is_megatron_lm_available, + is_mlu_available, + is_msamp_available, + is_npu_available, + is_torch_version, + is_torch_xla_available, + is_xpu_available, + load_fsdp_model, + load_fsdp_optimizer, + pad_across_processes, + parse_choice_from_env, + recursively_apply, + reduce, + release_memory, + save, + save_fsdp_model, + save_fsdp_optimizer, + shard_checkpoint, + wait_for_everyone, +) +from .utils.constants import FSDP_PYTORCH_VERSION +from .utils.modeling import get_state_dict_offloaded_model +from .utils.other import is_compiled_module + + +if is_deepspeed_available(): + from .utils import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + ) + +if is_fp8_available(): + import transformer_engine.common.recipe as te_recipe + from transformer_engine.pytorch import fp8_autocast + + +if is_megatron_lm_available(): + from .utils import ( + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + megatron_lm_initialize, + megatron_lm_prepare_data_loader, + megatron_lm_prepare_model, + megatron_lm_prepare_optimizer, + megatron_lm_prepare_scheduler, + ) + +from torch.distributed.algorithms.join import Join + + +if is_torch_xla_available(): + import torch_xla.amp as xamp + import torch_xla.core.xla_model as xm + import torch_xla.distributed.xla_multiprocessing as xmp + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = get_logger(__name__) + +# Sentinel values for defaults +_split_batches = object() +_dispatch_batches = object() +_even_batches = object() +_use_seedable_sampler = object() + + +class Accelerator: + """ + Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. + + Args: + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, + etc...). + mixed_precision (`str`, *optional*): + Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the + value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the + accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8' + requires the installation of transformers-engine. + gradient_accumulation_steps (`int`, *optional*, default to 1): + The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with + `Accelerator.accumulate`. If not passed, will default to the value in the environment variable + `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`. + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force + the execution on one process only. + dataloader_config (`DataLoaderConfiguration`, *optional*): + A configuration for how the dataloaders should be handled in distributed scenarios. + deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*): + Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*): + Tweak your FSDP related args using this argument. This argument is optional and can be configured directly + using *accelerate config* + megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*): + Tweak your MegatronLM related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration in your prepared + dataloaders. Should be one or several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + project_config ([`~utils.ProjectConfiguration`], *optional*): + A configuration for how saving the state can be handled. + project_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved + checkpoints. + step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`): + Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only + done under certain circumstances (at the end of each epoch, for instance). + kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*) + A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed + precision are created. See [kwargs](kwargs) for more information. + dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`): + Set to one of the possible dynamo backends to optimize your training with torch dynamo. + gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*): + A configuration for how gradient accumulation should be handled, if more tweaking than just the + `gradient_accumulation_steps` is needed. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. + - **local_process_index** (`int`) -- The process index on the current machine. + - **mixed_precision** (`str`) -- The configured mixed precision mode. + - **num_processes** (`int`) -- The total number of processes used for training. + - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of + gradient overflow in mixed precision), in which + case the learning rate should not be changed. + - **process_index** (`int`) -- The overall index of the current process among all processes. + - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. + - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. + - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. + """ + + def __init__( + self, + device_placement: bool = True, + split_batches: bool = _split_batches, + mixed_precision: PrecisionType | str | None = None, + gradient_accumulation_steps: int = 1, + cpu: bool = False, + dataloader_config: DataLoaderConfiguration | None = None, + deepspeed_plugin: DeepSpeedPlugin | None = None, + fsdp_plugin: FullyShardedDataParallelPlugin | None = None, + megatron_lm_plugin: MegatronLMPlugin | None = None, + rng_types: list[str | RNGType] | None = None, + log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None, + project_dir: str | os.PathLike | None = None, + project_config: ProjectConfiguration | None = None, + gradient_accumulation_plugin: GradientAccumulationPlugin | None = None, + dispatch_batches: bool | None = _dispatch_batches, + even_batches: bool = _even_batches, + use_seedable_sampler: bool = _use_seedable_sampler, + step_scheduler_with_optimizer: bool = True, + kwargs_handlers: list[KwargsHandler] | None = None, + dynamo_backend: DynamoBackend | str | None = None, + ): + self.trackers = [] + if project_config is not None: + self.project_configuration = project_config + else: + self.project_configuration = ProjectConfiguration(project_dir=project_dir) + if project_dir is not None and self.project_dir is None: + self.project_configuration.set_directories(project_dir) + if mixed_precision is not None: + mixed_precision = str(mixed_precision) + if mixed_precision not in PrecisionType: + raise ValueError( + f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" + ) + + dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend) + + if deepspeed_plugin is None: # init from env variables + deepspeed_plugin = ( + DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None + ) + else: + assert isinstance( + deepspeed_plugin, DeepSpeedPlugin + ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object." + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided + if deepspeed_plugin: + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") + if is_mlu_available(): + if compare_versions("deepspeed-mlu", "<", "0.10.1"): + raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.") + elif compare_versions("deepspeed", "<", "0.9.3"): + raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.") + + mixed_precision = ( + os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision + ) + deepspeed_plugin.set_mixed_precision(mixed_precision) + deepspeed_plugin.set_deepspeed_weakref() + + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance( + fsdp_plugin, FullyShardedDataParallelPlugin + ): + if is_torch_version("<", FSDP_PYTORCH_VERSION): + raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}") + + if fsdp_plugin is None: # init from env variables + fsdp_plugin = ( + FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None + ) + else: + if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): + raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") + os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided + + if megatron_lm_plugin is None: # init from env variables + megatron_lm_plugin = ( + MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None + ) + else: + if not isinstance(megatron_lm_plugin, MegatronLMPlugin): + raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") + os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided + + if megatron_lm_plugin: + if not is_megatron_lm_available(): + raise ImportError("Megatron is not installed. please build it from source.") + + # Kwargs handlers + self.ddp_handler = None + self.scaler_handler = None + self.init_handler = None + self.fp8_recipe_handler = None + self.autocast_handler = None + self.has_lomo_optimizer = False + + if kwargs_handlers is not None: + for handler in kwargs_handlers: + assert isinstance( + handler, KwargsHandler + ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." + if isinstance(handler, DistributedDataParallelKwargs): + if self.ddp_handler is not None: + raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.") + else: + self.ddp_handler = handler + elif isinstance(handler, GradScalerKwargs): + if self.scaler_handler is not None: + raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.") + else: + self.scaler_handler = handler + elif isinstance(handler, InitProcessGroupKwargs): + if self.init_handler is not None: + raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.") + else: + self.init_handler = handler + elif isinstance(handler, FP8RecipeKwargs): + if self.fp8_recipe_handler is not None: + raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.") + else: + self.fp8_recipe_handler = handler + elif isinstance(handler, AutocastKwargs): + if self.autocast_handler is not None: + raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.") + else: + self.autocast_handler = handler + + kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} + self.state = AcceleratorState( + mixed_precision=mixed_precision, + cpu=cpu, + dynamo_plugin=dynamo_plugin, + deepspeed_plugin=deepspeed_plugin, + fsdp_plugin=fsdp_plugin, + megatron_lm_plugin=megatron_lm_plugin, + _from_accelerator=True, + **kwargs, + ) + + self.delayed_fp8_autocast = False + if self.fp8_recipe_handler is not None: + # We already check if FP8 is available during `self.state` + if self.state.mixed_precision != "fp8": + raise ValueError("Passing in a `FP8RecipeKwargs` object requires setting `mixed_precision='fp8'`.") + self.delayed_fp8_autocast = self.fp8_recipe_handler.backend == "TE" and self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.FSDP, + ) + + trackers = filter_trackers(log_with, self.logging_dir) + if len(trackers) < 1 and log_with is not None: + warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") + self.log_with = trackers + + if ( + (mixed_precision != "bf16") + and getattr(self.state, "downcast_bfloat", False) + and (self.state.distributedType != DistributedType.XLA) + ): + raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") + + if gradient_accumulation_plugin is not None: + if gradient_accumulation_steps != 1: + raise ValueError( + "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object." + ) + else: + gradient_accumulation_steps = int( + parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps) + ) + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps) + self.gradient_state = GradientState( + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + + self.device_placement = device_placement + if dataloader_config is None: + dataloader_config = DataLoaderConfiguration() + self.dataloader_config = dataloader_config + # Deal with deprecated args + # TODO: Remove in v1.0.0 + deprecated_dl_args = {} + if dispatch_batches is not _dispatch_batches: + deprecated_dl_args["dispatch_batches"] = dispatch_batches + self.dataloader_config.dispatch_batches = dispatch_batches + if split_batches is not _split_batches: + deprecated_dl_args["split_batches"] = split_batches + self.dataloader_config.split_batches = split_batches + if even_batches is not _even_batches: + deprecated_dl_args["even_batches"] = even_batches + self.dataloader_config.even_batches = even_batches + if use_seedable_sampler is not _use_seedable_sampler: + deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler + self.dataloader_config.use_seedable_sampler = use_seedable_sampler + if len(deprecated_dl_args) > 0: + values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()]) + warnings.warn( + f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. " + "Please pass an `accelerate.DataLoaderConfiguration` instead: \n" + f"dataloader_config = DataLoaderConfiguration({values})", + FutureWarning, + ) + self.step_scheduler_with_optimizer = step_scheduler_with_optimizer + + # Mixed precision attributes + self.scaler = None + self.native_amp = False + if ( + self.state.mixed_precision == "fp16" + and self.device.type != "cpu" + and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) + ): + self.native_amp = True + if self.device.type not in ("xpu", "cuda", "npu", "xla", "mlu") or is_torch_xla_available( + check_is_tpu=True + ): + raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).") + kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} + if self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler + + self.scaler = ShardedGradScaler(**kwargs) + elif is_torch_xla_available(check_is_gpu=True): + self.scaler = xamp.GradScaler(**kwargs) + elif is_mlu_available(): + self.scaler = torch.mlu.amp.GradScaler(**kwargs) + elif is_npu_available(): + self.scaler = torch.npu.amp.GradScaler(**kwargs) + else: + self.scaler = torch.cuda.amp.GradScaler(**kwargs) + + elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( + DistributedType.DEEPSPEED, + DistributedType.MEGATRON_LM, + ): + if self.device.type in ["cpu", "xpu"]: + self.native_amp = True + else: + self.native_amp = is_bf16_available(True) + if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available(): + raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") + + elif self.state.mixed_precision == "fp8": + # We always enable `native_amp` for FP8 + self.native_amp = True + + # Start of internal step tracking + self.step = 0 + + # Internal references to the training objects + self._optimizers = [] + self._models = [] + self._schedulers = [] + self._dataloaders = [] + self._custom_objects = [] + + # Hooks + self._load_model_state_pre_hook = OrderedDict() + self._save_model_state_pre_hook = OrderedDict() + + # RNG Types + self.rng_types = rng_types + if self.rng_types is None: + self.rng_types = ["generator"] + + # Set a flag tensor for early stopping and other breakpoints + self.flag_tensor = None + + check_os_kernel() + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.state.use_distributed + + @property + def distributed_type(self): + return self.state.distributed_type + + @property + def num_processes(self): + return self.state.num_processes + + @property + def process_index(self): + return self.state.process_index + + @property + def local_process_index(self): + return self.state.local_process_index + + @property + def device(self): + return self.state.device + + @property + def split_batches(self): + return self.dataloader_config.split_batches + + @property + def dispatch_batches(self): + return self.dataloader_config.dispatch_batches + + @property + def even_batches(self): + return self.dataloader_config.even_batches + + @even_batches.setter + def even_batches(self, value: bool): + self.dataloader_config.even_batches = value + + @property + def use_seedable_sampler(self): + return self.dataloader_config.use_seedable_sampler + + @property + def non_blocking(self): + return self.dataloader_config.non_blocking + + @property + def project_dir(self): + return self.project_configuration.project_dir + + @property + def logging_dir(self): + return self.project_configuration.logging_dir + + @property + def save_iteration(self): + return self.project_configuration.iteration + + @property + def is_main_process(self): + """True for one process only.""" + return self.state.is_main_process + + @property + def is_local_main_process(self): + """True for one process per server.""" + return self.state.is_local_main_process + + @property + def use_fp16(self): + warnings.warn( + "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`Accelerator.mixed_precision == 'fp16'` instead.", + FutureWarning, + ) + return self.mixed_precision != "no" + + @property + def is_last_process(self): + return self.process_index == self.num_processes - 1 + + @property + def mixed_precision(self): + return self.state.mixed_precision + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs + or passing in less inputs than there are processes. If so, just remember to drop the padded elements + afterwards. + + Example: + + ```python + # Assume there are two processes + from accelerate import Accelerator + + accelerator = Accelerator() + with accelerator.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + def on_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + + + >>> @accelerator.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_main_process(function)(*args, **kwargs) + + return _inner + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the local main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_main_process(function)(*args, **kwargs) + + return _inner + + def on_last_process(self, function: Callable[..., Any]): + """ + A decorator that will run the decorated function on the last process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_last_process + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_last_process(function)(*args, **kwargs) + + return _inner + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + A decorator that will run the decorated function on a given process index only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_process(process_index=2) + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (process_index is not None) and (function is None): + return partial(self.on_process, process_index=process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_process(function, process_index)(*args, **kwargs) + + return _inner + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + A decorator that will run the decorated function on a given local process index only. Can also be called using + the `PartialState` class. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (local_process_index is not None) and (function is None): + return partial(self.on_local_process, local_process_index=local_process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_process(function, local_process_index)(*args, **kwargs) + + return _inner + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + with self.state.main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.local_process_index}") + ``` + """ + with self.state.local_main_process_first(): + yield + + @contextmanager + def no_sync(self, model): + """ + A context manager to disable gradient synchronizations across DDP processes by calling + `torch.nn.parallel.DistributedDataParallel.no_sync`. + + If `model` is not in DDP, this context manager does nothing + + Args: + model (`torch.nn.Module`): + PyTorch Module that was prepared with `Accelerator.prepare` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + >>> input_a = next(iter(dataloader)) + >>> input_b = next(iter(dataloader)) + + >>> with accelerator.no_sync(): + ... outputs = model(input_a) + ... loss = loss_func(outputs) + ... accelerator.backward(loss) + ... # No synchronization across processes, only accumulate gradients + >>> outputs = model(input_b) + >>> accelerator.backward(loss) + >>> # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + context = contextlib.nullcontext + if self.use_distributed: + context = getattr(model, "no_sync", context) + + with context(): + yield + + @staticmethod + @contextmanager + def trigger_sync_in_backward(model): + """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under + `Accelerator.no_sync` (only applicable in multi-GPU scenarios). + + If the script is not launched in distributed mode, this context manager does nothing. + + Args: + model (`torch.nn.Module`): + The model for which to trigger the gradient synchronization. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + + >>> with accelerator.no_sync(): + ... loss_a = loss_func(model(input_a)) # first forward pass + ... loss_b = loss_func(model(input_b)) # second forward pass + >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients + >>> with accelerator.trigger_sync_in_backward(model): + ... accelerator.backward(loss_b) # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + if not isinstance(model, torch.nn.parallel.DistributedDataParallel): + yield + return + + old_require_backward_grad_sync = model.require_backward_grad_sync + old_require_forward_param_sync = model.require_forward_param_sync + + # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features. + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466 + model.require_backward_grad_sync = True + model.require_forward_param_sync = True + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402 + model.reducer.prepare_for_backward([]) + try: + yield + finally: + model.require_backward_grad_sync = old_require_backward_grad_sync + model.require_forward_param_sync = old_require_forward_param_sync + + def _do_sync(self, force: bool = False): + "Sets the right `sync_gradients` context and either resets or increases `self.step`" + if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader: + self.step = 0 + self.gradient_state._set_sync_gradients(True) + else: + self.step += 1 + self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0)) + + @property + def sync_gradients(self): + return self.gradient_state.sync_gradients + + @sync_gradients.setter + def sync_gradients(self, sync_gradients): + self.gradient_state.sync_gradients = sync_gradients + + @property + def gradient_accumulation_steps(self): + return self.gradient_state.num_steps + + @gradient_accumulation_steps.setter + def gradient_accumulation_steps(self, gradient_accumulation_steps): + self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps}) + + @contextmanager + def accumulate(self, *models): + """ + A context manager that will lightly wrap around and perform gradient accumulation automatically + + Args: + *models (list of `torch.nn.Module`): + PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will + skip gradient syncing during backward pass in distributed training + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=1) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, output in dataloader: + ... with accelerator.accumulate(model): + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... scheduler.step() + ... optimizer.zero_grad() + ``` + """ + # sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore + # resulting in the nullcontext always being selected. + self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False)) + with contextlib.ExitStack() as cm_stack: + for m in models: + cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m)) + yield + + @contextmanager + def join_uneven_inputs(self, joinables, even_batches=None): + """ + A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper + around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the + length of the dataset. + + Args: + joinables (`list[torch.distributed.algorithms.Joinable]`): + A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a + PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. + even_batches (`bool`, *optional*) + If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, + the default `Accelerator` value wil be used. + + + + `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other + configuration, this method will have no effect. + + + + + + Overidding `even_batches` will not affect iterable-style data loaders. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(even_batches=True) + >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) + + >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + ... for input, output in dataloader: + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... optimizer.zero_grad() + ``` + """ + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_XPU, + ): + dl_even_batches_values = [] + + if even_batches is not None: + iterable_dl_seen = False + # override value in batch sampler for map-style datasets + for dl_idx, dl in enumerate(self._dataloaders): + if isinstance(dl, DataLoaderDispatcher): + iterable_dl_seen = True + continue + dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) + dl.batch_sampler.even_batches = even_batches + + if iterable_dl_seen: + warnings.warn( + "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" + ) + else: + even_batches = self.even_batches + + enable_join = False if even_batches else True + try: + with Join(joinables, enable=enable_join, throw_on_early_termination=False): + yield + finally: + # reset any batch samplers that have been modified + for dl_idx, even_batches_value in dl_even_batches_values: + self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value + else: + # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs + if self.distributed_type != DistributedType.NO: + warnings.warn( + "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." + ) + + with contextlib.nullcontext(joinables): + yield + + def print(self, *args, **kwargs): + """ + Drop in replacement of `print()` to only print once per server. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> accelerator.print("Hello world!") + ``` + """ + self.state.print(*args, **kwargs) + + def _prepare_one(self, obj, first_pass=False, device_placement=None): + # First pass of preparation: DataLoader, model, optimizer + if first_pass: + if isinstance(obj, torch.utils.data.DataLoader): + return self.prepare_data_loader(obj, device_placement=device_placement) + elif isinstance(obj, torch.nn.Module): + return self.prepare_model(obj, device_placement=device_placement) + elif isinstance(obj, torch.optim.Optimizer): + optimizer = self.prepare_optimizer(obj, device_placement=device_placement) + return optimizer + # Second pass of preparation: LR scheduler (which need the full list of optimizers) + elif isinstance(obj, LRScheduler): + scheduler = self.prepare_scheduler(obj) + return scheduler + # Return the unprocessed object if previous criteria was not met + return obj + + def prepare(self, *args, device_placement=None): + """ + Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same + order. + + Args: + *args (list of objects): + Any of the following type of objects: + + - `torch.utils.data.DataLoader`: PyTorch Dataloader + - `torch.nn.Module`: PyTorch Module + - `torch.optim.Optimizer`: PyTorch Optimizer + - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler + + device_placement (`list[bool]`, *optional*): + Used to customize whether automatic device placement should be performed for each object passed. Needs + to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP. + + + + You don't need to prepare a model if you only use it for inference without any kind of mixed precision + + + + Examples: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler) + ``` + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> device_placement = [True, True, False, False] + >>> # Will place the first to items passed in automatically to the right device but not the last two. + >>> model, optimizer, data_loader, scheduler = accelerator.prepare( + ... model, optimizer, data_loader, scheduler, device_placement=device_placement + ... ) + ``` + """ + if device_placement is None: + device_placement = [None for _ in args] + elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): + raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") + elif len(device_placement) != len(args): + raise ValueError( + f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." + ) + + for obj in args: + # TODO: Look at enabling native TP training directly with a proper config + if ( + isinstance(obj, torch.nn.Module) + and self.verify_device_map(obj) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if self.distributed_type == DistributedType.DEEPSPEED: + model_count = 0 + for obj in args: + if isinstance(obj, torch.nn.Module): + model_count += 1 + if model_count > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + + # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will + # have parameters disconnected from the model (so no training :-( ). + # If the model and optimizer have parameters on different devices we raise an error. + if self.distributed_type == DistributedType.XLA: + model_device, optimizer_device = self._get_devices() + if model_device is not None and optimizer_device is not None and model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you " + "created an optimizer around your model **before** putting on the device. Make sure the line " + "model.to(device) is before the optimizer creation in your script or remove it entirely and use " + "the flag default value for `device_placement` in your `Accelerator` to let it handle that " + "part for you." + ) + + # If we're dealing with device placement, this deals with that by... + tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA + if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): + # 1. grabbing old model parameters + old_named_params = self._get_named_parameters(*args) + + if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if self.device.type == "cpu" and self.state.use_ipex: + args = self._prepare_ipex(*args) + elif self.device.type == "xpu" and is_xpu_available(): + args = self._prepare_ipex(*args) + if self.distributed_type == DistributedType.DEEPSPEED: + result = self._prepare_deepspeed(*args) + elif self.distributed_type == DistributedType.MEGATRON_LM: + result = self._prepare_megatron_lm(*args) + else: + if self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "MSAMP": + args = self._prepare_msamp(*args) + # MS-AMP will handle the device placement + device_placement = [False for _ in args] + result = tuple( + self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) + ) + result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) + + if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"): + # 2. grabbing new model parameters + new_named_params = self._get_named_parameters(*result) + # 3. building a map from the first to the second + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # 4. using that map to update the parameters of the optimizer + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + obj._switch_parameters(mapping) + + for item in result: + if any( + item in container + for container in (self._dataloaders, self._models, self._optimizers, self._schedulers) + ): + item._is_accelerate_prepared = True + + return result if len(result) > 1 else result[0] + + def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False): + """ + Prepares a PyTorch model for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + model (`torch.nn.Module`): + A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without + any kind of mixed precision + device_placement (`bool`, *optional*): + Whether or not to place the model on the proper device. Will default to `self.device_placement`. + evaluation_mode (`bool`, *optional*, defaults to `False`): + Whether or not to set the model for evaluation only, by just applying mixed precision and + `torch.compile` (if configured in the `Accelerator` object). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model is defined + >>> model = accelerator.prepare_model(model) + ``` + """ + if device_placement is None: + device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP + self._models.append(model) + + # TODO: Look at enabling native TP training directly with a proper config + if ( + self.verify_device_map(model) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if self.native_amp: + model._original_forward = model.forward + model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward + autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler) + new_forward = autocast_context(model_forward_func) + if hasattr(model.forward, "__func__"): + model.forward = MethodType(new_forward, model) + model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) + else: + model.forward = convert_outputs_to_fp32(new_forward) + + # We prepare fp8 after, allowing for bf16 autocast to happen first + if getattr(self.fp8_recipe_handler, "backend", None) == "TE": + if not has_transformer_engine_layers(model): + with torch.no_grad(): + convert_model(model) + model._converted_to_transformer_engine = True + + kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {} + if "fp8_format" in kwargs: + kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) + fp8_recipe = te_recipe.DelayedScaling(**kwargs) + # If we are in DDP or FSDP, we delay `autocast` until after FSDP/DDP has been initialized + # to make use of the process group + if not self.delayed_fp8_autocast: + model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward) + + if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr( + model, "hf_device_map", False + ): + model_devices = set(model.hf_device_map.values()) + if len(model_devices) > 1 and self.distributed_type != DistributedType.NO: + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode." + " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism." + " Therefore you should not specify that you are under any distributed regime in your accelerate config." + ) + elif len(model_devices) == 1: + current_device = list(model_devices)[0] + current_device_index = ( + current_device.index if isinstance(current_device, torch.device) else current_device + ) + + if torch.device(current_device_index) != self.device: + # if on the first device (GPU 0) we don't care + if (self.device.index is not None) or (current_device_index != 0): + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision on a different device than the one " + "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}" + ) + + if "cpu" in model_devices or "disk" in model_devices: + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload." + ) + elif device_placement and not self.verify_device_map(model): + model = model.to(self.device) + if not evaluation_mode: + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + ): + if any(p.requires_grad for p in model.parameters()): + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + # TODO: Look at enabling native TP training directly with a proper config + if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true": + device_ids, output_device = [self.local_process_index], self.local_process_index + else: + device_ids, output_device = None, None + + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=device_ids, output_device=output_device, **kwargs + ) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, + # don't wrap it again + # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it + # is a FSDP model, don't wrap it again + is_type_fsdp = isinstance(model, FSDP) or ( + is_compiled_module(model) and isinstance(model._orig_mod, FSDP) + ) + + if not is_type_fsdp: + self.state.fsdp_plugin.set_auto_wrap_policy(model) + fsdp_plugin = self.state.fsdp_plugin + kwargs = { + "sharding_strategy": fsdp_plugin.sharding_strategy, + "cpu_offload": fsdp_plugin.cpu_offload, + "auto_wrap_policy": fsdp_plugin.auto_wrap_policy, + "mixed_precision": fsdp_plugin.mixed_precision_policy, + "sync_module_states": fsdp_plugin.sync_module_states, + "backward_prefetch": fsdp_plugin.backward_prefetch, + "forward_prefetch": fsdp_plugin.forward_prefetch, + "use_orig_params": fsdp_plugin.use_orig_params, + "param_init_fn": fsdp_plugin.param_init_fn, + "ignored_modules": fsdp_plugin.ignored_modules, + "limit_all_gathers": fsdp_plugin.limit_all_gathers, + "device_id": self.device, + } + model = FSDP(model, **kwargs) + if fsdp_plugin.activation_checkpointing: + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + CheckpointImpl, + apply_activation_checkpointing, + checkpoint_wrapper, + ) + + apply_activation_checkpointing( + model, + checkpoint_wrapper_fn=functools.partial( + checkpoint_wrapper, + checkpoint_impl=CheckpointImpl.NO_REENTRANT, + ), + auto_wrap_policy=fsdp_plugin.auto_wrap_policy, + ) + + # In the event the model had been loaded in low precision, but + # mixed precision had also been activated, then we follow DeepSpeed's + # strategy to hold the parameters in full precision. + # - assume that trainer.args.bf16 and trainer.args.fp16 are already checked against + # fsdp_plugin.mixed_precision_policy. + # - NOTE: we do not check the mixed_precision attribute on the FSDP root wrapper. + # * this attribute will always set by init_utils.init_core_state so its always not None. + # * mixed_precision.param_dtype only regards _fwd_bwd_param_dtype + # * if model is loaded in 16bit, and even if mixed_precision.param_dtype is None, + # we sill want to upcast the flat_param. + if self.mixed_precision != "no": # if mixed precision is set + upcasted_log = [] + for module in FSDP.fsdp_modules(model): + # Referencing DeepSpeed Zero3 + # - in Init, params are converted to 16bit while partitioning. + # - in accelerator.prepare, deepspeed.initalize is called to: + # * creates the DeepSpeeedEngine. + # * since zero_optimization() is True , calls engine._configure_zero_optimizer. + # + # Inside the DeepSpeed Zero3 optimizer configuration, which initalizes + # DeepSpeedZeroOptimizer_Stage3, during which: + # * trainable_param_groups are obtained from the attached optimizer + # (already partitioned in 16bit). + # * then _setup_for_real_optimizer -> _create_fp32_partitions + # which performs the fp32 upcasting. + + # To mimick DeepSeepds's casting in FSDP, we look at the (single) FlatParameter held + # within an FSDP wrapper. This FlatParameter will be seen by the optimizer. + # - even though there is a torch.device('meta') guard below, we + # expect _init_utils._init_param_handle_from_module to already + # sync the parameter. + + if not module._has_params: + continue # skip if FSDP module not managing parameters + param = module._flat_param + if ( + param.dtype != torch.float32 + and param.device != torch.device("meta") + and param.requires_grad + ): + # keep log of names_params that was upcasted + # NOTE: resorted to this because warnings.simplefilter("once") is somehow not working + name_param_log = (module.module.__class__.__name__, ", ".join(module._flat_param._fqns)) + if name_param_log not in upcasted_log: + upcasted_log.append(name_param_log) + + # this works because of FSDP's _runtime_utils.lazy_init. + # Have to be careful not to call anything before this that + # triggers lazy_init (e.g., _is_fsdp_root). + param.data = param.data.to(torch.float32) # upcasting + module._handle._orig_param_dtype = torch.float32 # update + + # report the warnings + # some messages can be quite repetitive, especially when reporting about layers that have identical architecture. + if self.is_main_process: + for name_log, param_log in upcasted_log: + warnings.warn( + f"Upcasted low precision parameters in {name_log} because mixed precision turned on in FSDP. " + f"Affects: {param_log}." + ) + + if len(upcasted_log) > 0: + warnings.warn( + "FSDP upcast of low precision parameters may affect the precision of model checkpoints." + ) + + # if the previous and current models are same, delete the previous one + if len(self._models) > 1 and (self._models[-2] is self._models[-1]): + del self._models[-2] + self._models[-1] = model + elif self.distributed_type == DistributedType.MULTI_CPU: + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) + elif self.distributed_type == DistributedType.XLA and self.state.fork_launched: + model = xmp.MpModelWrapper(model).to(self.device) + # Now we can apply the FP8 autocast + if self.delayed_fp8_autocast: + model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe, fp8_group=model.process_group)( + model.forward + ) + # torch.compile should be called last and only if the model isn't already compiled. + if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): + if not is_torch_version(">=", "2.0"): + raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.") + model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) + return model + + def _prepare_deepspeed(self, *args): + import deepspeed + + deepspeed_plugin = self.state.deepspeed_plugin + + is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args) + result = [ + self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj + for obj in args + ] + + if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"): + if is_dataloader_present: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if any(bs is None for bs in batch_sizes): + raise ValueError( + "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. " + "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + if self.split_batches: + batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] + + batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." + ) + else: + raise ValueError( + "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " + "with `batch_size` attribute returning an integer value " + "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file " + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + else: + batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu") + + # handle `gradient_accumulation_steps` when the value is `auto` + deepspeed_plugin.fill_match( + "gradient_accumulation_steps", + must_match=False, + gradient_accumulation_steps=self.gradient_accumulation_steps, + ) + + config_kwargs = { + "train_micro_batch_size_per_gpu": batch_size_per_device, + "train_batch_size": batch_size_per_device + * deepspeed_plugin.get_value("gradient_accumulation_steps") + * self.num_processes, + "gradient_clipping": 1.0, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + + model = None + optimizer = None + scheduler = None + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): + optimizer = obj + elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( + type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + scheduler = obj + + if optimizer is not None: + if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot specify an optimizer in the config file and in the code at the same time. " + "Please remove the optimizer from the config file or " + "create `accelerate.utils.DummyOptim` in the code." + ) + elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot create a `DummyOptim` without specifying an optimizer in the config file." + ) + + if isinstance(optimizer, (torch.optim.Optimizer)): + deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True + + if scheduler is not None: + if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You cannot specify a scheduler in the config file and in the code at the same time. " + "Please remove the scheduler from the config file or " + "create `accelerate.utils.DummyScheduler` in the code." + ) + elif ( + "scheduler" not in deepspeed_plugin.deepspeed_config + and isinstance(scheduler, (DummyScheduler)) + and scheduler.lr_scheduler_callable is None + ): + raise ValueError( + "Either specify a scheduler in the config file or " + "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`." + ) + + if optimizer is not None and scheduler is not None: + if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You can only specify `accelerate.utils.DummyScheduler` in the code when using " + "`accelerate.utils.DummyOptim`." + ) + + if model is not None: + # if the model is an MOE, set the appropriate MOE layers as leaf Z3 modules + deepspeed_plugin.set_moe_leaf_modules(model) + # deal with config keys that use `auto` value and rely on model's hidden_size + hidden_size_based_keys = [ + "zero_optimization.reduce_bucket_size", + "zero_optimization.stage3_prefetch_bucket_size", + "zero_optimization.stage3_param_persistence_threshold", + ] + hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)] + if len(hidden_size_auto_keys) > 0: + reasoning = ( + "therefore it's not possible to automatically fill out the following `auto` entries " + + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing " + + "`auto` values for these keys with an integer value of your choice." + ) + if not hasattr(model, "config"): + raise ValueError("Can't find `model.config` entry, " + reasoning) + + if hasattr(model.config, "hidden_size"): + hidden_size = model.config.hidden_size + elif hasattr(model.config, "hidden_sizes"): + # if there are many hidden sizes pick the largest one + hidden_size = max(model.config.hidden_sizes) + else: + raise ValueError( + "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning + ) + + config_kwargs.update( + { + "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, + "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, + "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, + } + ) + + if isinstance(optimizer, (DummyOptim)): + config_kwargs.update( + {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} + ) + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None: + max_lr = ( + getattr(scheduler.optimizer, "lr", None) + if getattr(scheduler.optimizer, "defaults", None) is None + else scheduler.optimizer.defaults["lr"] + ) + config_kwargs.update( + { + "scheduler.params.warmup_min_lr": 0, + "scheduler.params.warmup_max_lr": max_lr, + "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, + } + ) + if scheduler.total_num_steps is not None: + config_kwargs["scheduler.params.total_num_steps"] = ( + math.ceil(scheduler.total_num_steps / self.num_processes) + if not self.split_batches + else scheduler.total_num_steps + ) + deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) + self.deepspeed_config = deepspeed_plugin.deepspeed_config + kwargs = dict(model=model, config_params=self.deepspeed_config) + if optimizer is not None: + if isinstance(optimizer, (DummyOptim)): + kwargs["model_parameters"] = optimizer.params + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None: + kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable + else: + if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get( + "device", "none" + ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True): + from deepspeed.ops.adam import DeepSpeedCPUAdam + + defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} + optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults) + kwargs["optimizer"] = optimizer + if scheduler is not None: + if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES: + kwargs["lr_scheduler"] = scheduler + + engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) + if optimizer is not None: + optimizer = DeepSpeedOptimizerWrapper(optimizer) + if scheduler is not None: + if lr_scheduler is None: + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + else: + scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = engine + elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): + result[i] = optimizer + elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( + type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + result[i] = scheduler + # pointing for deepspeed_engine_wrapped.backward() + self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) + self._models.append(engine) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + return tuple(result) + + def _prepare_megatron_lm(self, *args): + megatron_lm_plugin = self.state.megatron_lm_plugin + if not megatron_lm_plugin.megatron_dataset_flag: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if len(batch_sizes) == 0: + raise ValueError( + "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." + ) + + micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." + ) + else: + for obj in args: + if isinstance(obj, MegatronLMDummyDataLoader): + micro_batch_size = obj.dataset_args["micro_batch_size"] + break + + dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) + megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) + + model = None + optimizer = None + scheduler = None + is_dummy_scheduler = False + batch_data = None + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: + batch_data = next(iter(obj)) + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): + scheduler = obj + + if model is not None: + megatron_lm_plugin.set_network_size_args(model, batch_data) + if optimizer is not None: + megatron_lm_plugin.set_optimizer_type(optimizer) + if scheduler is not None: + is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler) + if not is_dummy_scheduler: + raise ValueError( + "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." + ) + megatron_lm_plugin.set_scheduler_args(scheduler) + + # initialize megatron-lm + megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) + counter = 0 + result = [] + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader): + result.append(megatron_lm_prepare_data_loader(self, obj)) + counter += 1 + elif isinstance(obj, MegatronLMDummyDataLoader): + if counter == 0: + obj.set_megatron_data_args() + dataloaders = megatron_lm_prepare_data_loader(self, obj) + result.append(dataloaders[counter]) + counter += 1 + else: + result.append(obj) + + if model is not None: + model = megatron_lm_prepare_model(self) + if optimizer is not None: + optimizer = megatron_lm_prepare_optimizer(self, model) + if scheduler is not None: + scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler) + + if model is not None: + model = MegatronEngine(self, model, optimizer, scheduler) + if optimizer is not None: + optimizer = MegatronLMOptimizerWrapper(optimizer) + if scheduler is not None: + scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], torch.optim.Optimizer): + result[i] = optimizer + elif isinstance(result[i], MegatronLMDummyScheduler): + result[i] = scheduler + if model is not None: + self._models.append(model) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" + ) + return tuple(result) + + def _prepare_ipex(self, *args): + if not is_ipex_available(): + raise ImportError( + "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + else: + import intel_extension_for_pytorch as ipex + + model = None + optimizer = None + result = [obj for obj in args] + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + model.train() + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + if optimizer is not None and model is not None: + dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None + if self.device.type == "xpu" and is_xpu_available(): + model = model.to(self.device) + model, optimizer = torch.xpu.optimize( + model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1" + ) + else: + model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1") + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], (torch.optim.Optimizer)): + result[i] = optimizer + return tuple(result) + + def _prepare_msamp(self, *args): + if not is_msamp_available(): + raise ImportError( + "MS-AMP was not found on your system. Please ensure that MS-AMP is available " + " or choose `'te'` as the backend for FP8 mixed precision training." + ) + else: + import msamp + + model, optimizer = None, None + num_models, num_optimizers = 0, 0 + result = [obj for obj in args] + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + num_models += 1 + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + num_optimizers += 1 + if optimizer is None or model is None: + raise ValueError( + "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP." + ) + elif num_models > 1 or num_optimizers > 1: + raise ValueError( + f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP." + ) + else: + model, optimizer = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level) + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], (torch.optim.Optimizer)): + result[i] = optimizer + return tuple(result) + + def prepare_data_loader( + self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None + ): + """ + Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + data_loader (`torch.utils.data.DataLoader`): + A vanilla PyTorch DataLoader to prepare + device_placement (`bool`, *optional*): + Whether or not to place the batches on the proper device in the prepared dataloader. Will default to + `self.device_placement`. + slice_fn_for_dispatch (`Callable`, *optional*`): + If passed, this function will be used to slice tensors across `num_processes`. Will default to + [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will + be ignored otherwise. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> data_loader = torch.utils.data.DataLoader(...) + >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True) + ``` + """ + # Ensure we can't double wrap a DataLoader due to `find_batch_size` + if getattr(data_loader, "_is_accelerate_prepared", False): + if data_loader not in self._dataloaders: + self._dataloaders.append(data_loader) + return data_loader + if device_placement is None: + device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False + prepared_data_loader = prepare_data_loader( + data_loader, + self.device, + num_processes=self.num_processes, + process_index=self.process_index, + split_batches=self.split_batches, + put_on_device=device_placement, + rng_types=self.rng_types.copy(), + dispatch_batches=self.dispatch_batches, + even_batches=self.even_batches, + slice_fn_for_dispatch=slice_fn_for_dispatch, + use_seedable_sampler=self.use_seedable_sampler, + non_blocking=self.non_blocking, + ) + self._dataloaders.append(prepared_data_loader) + return prepared_data_loader + + def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): + """ + Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + optimizer (`torch.optim.Optimizer`): + A vanilla PyTorch optimizer to prepare + device_placement (`bool`, *optional*): + Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True) + ``` + """ + if is_lomo_available(): + # We need to import locally to avoid circular imports since lomo imports stuff from + # transformers & accelerate + from lomo_optim import AdaLomo, Lomo + + self.has_lomo_optimizer = isinstance(optimizer, (Lomo, AdaLomo)) + + # Ensure we can't double wrap an optimizer due to `find_batch_size` + if getattr(optimizer, "_is_accelerate_prepared", False): + if optimizer not in self._optimizers: + self._optimizers.append(optimizer) + return optimizer + if device_placement is None: + device_placement = self.device_placement + optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) + self._optimizers.append(optimizer) + return optimizer + + def prepare_scheduler(self, scheduler: LRScheduler): + """ + Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + scheduler (`torch.optim.lr_scheduler.LRScheduler`): + A vanilla PyTorch scheduler to prepare + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) + >>> scheduler = accelerator.prepare_scheduler(scheduler) + ``` + """ + # Ensure we can't double wrap a scheduler due to `find_batch_size` + if getattr(scheduler, "_is_accelerate_prepared", False): + if scheduler not in self._schedulers: + self._schedulers.append(scheduler) + return scheduler + # We try to find the optimizer associated with `scheduler`, the default is the full list. + optimizer = self._optimizers + for opt in self._optimizers: + if getattr(scheduler, "optimizer", None) == opt.optimizer: + optimizer = opt + break + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + self._schedulers.append(scheduler) + return scheduler + + def backward(self, loss, **kwargs): + """ + Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based + on the configuration. + + Should be used in lieu of `loss.backward()`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + ``` + """ + learning_rate = kwargs.get("learning_rate") + + if self.distributed_type != DistributedType.DEEPSPEED: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.gradient_accumulation_steps + if self.distributed_type == DistributedType.DEEPSPEED: + self.deepspeed_engine_wrapped.backward(loss, **kwargs) + elif self.distributed_type == DistributedType.MEGATRON_LM: + return + elif self.scaler is not None: + self.scaler.scale(loss).backward(**kwargs) + elif learning_rate is not None and self.has_lomo_optimizer: + self.lomo_backward(loss, learning_rate) + else: + loss.backward(**kwargs) + + def set_trigger(self): + """ + Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which + will check across all processes. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_breakpoint(): + ... break + ``` + """ + self.flag_tensor = torch.tensor(1, device=self.device) + + def check_trigger(self): + """ + Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and + reset the trigger tensor to 0. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_trigger(): + ... break + ``` + """ + # Now that we are outside `__init__`, we can initialize it if it is `None` on device + if self.flag_tensor is None: + self.flag_tensor = torch.tensor(0, device=self.device) + flag_tensor = self.reduce(self.flag_tensor) + if flag_tensor.item() >= 1: + self.flag_tensor = torch.tensor(0, device=self.device) + return True + return False + + def unscale_gradients(self, optimizer=None): + """ + Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. + + Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`] + + Args: + optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*): + The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers + that were passed to [`~Accelerator.prepare`]. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer = accelerator.prepare(model, optimizer) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + >>> accelerator.unscale_gradients(optimizer=optimizer) + ``` + """ + if self.native_amp and self.mixed_precision == "fp16": + if optimizer is None: + # TODO: this unscales all optimizers where we should only unscale the one where parameters are. + optimizer = self._optimizers + elif not isinstance(optimizer, (tuple, list)): + optimizer = [optimizer] + for opt in optimizer: + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + self.scaler.unscale_(opt) + + def clip_grad_norm_(self, parameters, max_norm, norm_type=2): + """ + Should be used in place of `torch.nn.utils.clip_grad_norm_`. + + Returns: + `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) + ... optimizer.step() + ``` + """ + if self.distributed_type == DistributedType.FSDP: + self.unscale_gradients() + parameters = [p for p in parameters] + for model in self._models: + if parameters == [p for p in model.parameters()]: + return model.clip_grad_norm_(max_norm, norm_type) + elif self.distributed_type == DistributedType.DEEPSPEED: + # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + # We cannot return the gradient norm because DeepSpeed does it. + return None + elif self.distributed_type == DistributedType.XLA: + # Reduce gradients first for XLA + for acc_opt in self._optimizers: + if not acc_opt.gradient_state.is_xla_gradients_synced: + opt = acc_opt + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + gradients = xm._fetch_gradients(opt) + # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor + # one by one in self.reduce is non-inplace. + xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes) + # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step. + acc_opt.gradient_state.is_xla_gradients_synced = True + self.unscale_gradients() + return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) + + def clip_grad_value_(self, parameters, clip_value): + """ + Should be used in place of `torch.nn.utils.clip_grad_value_`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_value_(model.parameters(), clip_value) + ... optimizer.step() + ``` + """ + if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: + raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") + self.unscale_gradients() + torch.nn.utils.clip_grad_value_(parameters, clip_value) + + def gather(self, tensor): + """ + Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to + regroup the predictions from all processes when doing evaluation. + + Note: + This gather happens in all processes. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to gather across all processes. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the + first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. + + Example: + + ```python + >>> # Assuming four processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.tensor([accelerator.process_index]) + >>> gathered_tensor = accelerator.gather(process_tensor) + >>> gathered_tensor + tensor([0, 1, 2, 3]) + ``` + """ + return gather(tensor) + + def gather_for_metrics(self, input_data, use_gather_object=False): + """ + Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be + used for gathering the inputs and targets for metric calculation. + + Args: + input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`): + The tensors or objects for calculating metrics across all processes + use_gather_object(`bool`): + Whether to forcibly use gather_object instead of gather (which is already done if all objects passed do + not contain tensors). This flag can be useful for gathering tensors with different sizes that we don't + want to pad and concatenate along the first dimension. Using it with GPU tensors is not well supported + and inefficient as it incurs GPU -> CPU transfer since tensors would be pickled. + + Example: + + ```python + >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5) + >>> dataloader = accelerator.prepare(dataloader) + >>> batch = next(iter(dataloader)) + >>> gathered_items = accelerator.gather_for_metrics(batch) + >>> len(gathered_items) + 9 + ``` + """ + + try: + recursively_apply(lambda x: x, input_data, error_on_other_type=True) + all_tensors = True + except TypeError: + all_tensors = False + + use_gather_object = use_gather_object or not all_tensors + + if use_gather_object: + data = gather_object(input_data) + else: + data = self.gather(input_data) + + try: + if self.gradient_state.end_of_dataloader: + # at the end of a dataloader, `gather_for_metrics` regresses to + # `gather` unless the dataset has a remainder so log. + if self.gradient_state.remainder == -1: + logger.info( + "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." + ) + return data + elif self.gradient_state.remainder > 0: + # Last batch needs to be truncated on distributed systems as it contains additional samples + def _adjust_samples(tensor): + return tensor[: self.gradient_state.remainder] + + if use_gather_object: + # gather_object put the objects in a list + return _adjust_samples(data) + else: + return recursively_apply(_adjust_samples, data) + else: # remainder is 0 + # no remainder even though at end of dataloader, so nothing to do. + return data + else: + # Not at the end of the dataloader, no need to adjust the tensors + return data + except Exception: + # Dataset had no length or raised an error + return data + + def reduce(self, tensor, reduction="sum", scale=1.0): + """ + Reduce the values in *tensor* across all processes based on *reduction*. + + Note: + All processes get the reduced value. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to reduce across all processes. + reduction (`str`, *optional*, defaults to "sum"): + A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. + scale (`float`, *optional*, defaults to 1.0): + A default scaling value to be applied after the reduce, only valied on XLA. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The reduced tensor(s). + + Example: + + ```python + >>> # Assuming two processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index) + >>> process_tensor = process_tensor.to(accelerator.device) + >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum") + >>> reduced_tensor + tensor([4, 6]) + ``` + """ + return reduce(tensor, reduction, scale) + + def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The padded tensor(s). + + Example: + + ```python + >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2 + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device) + >>> padded_tensor = accelerator.pad_across_processes(process_tensor) + >>> padded_tensor.shape + torch.Size([2]) + ``` + """ + return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) + + def unwrap_model(self, model, keep_fp32_wrapper: bool = True): + """ + Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving + the model. + + Args: + model (`torch.nn.Module`): + The model to unwrap. + keep_fp32_wrapper (`bool`, *optional*, defaults to `True`): + Whether to not remove the mixed precision hook if it was added. + + Returns: + `torch.nn.Module`: The unwrapped model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> from torch.nn.parallel import DistributedDataParallel + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = accelerator.prepare(MyModel()) + >>> print(model.__class__.__name__) + DistributedDataParallel + + >>> model = accelerator.unwrap_model(model) + >>> print(model.__class__.__name__) + MyModel + ``` + """ + return extract_model_from_parallel(model, keep_fp32_wrapper) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> if accelerator.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> accelerator.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + wait_for_everyone() + + @on_main_process + def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}): + """ + Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations + + Args: + project_name (`str`): + The name of the project. All trackers will save their data based on this + config (`dict`, *optional*): + Optional starting configuration to be logged. + init_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be + formatted like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers( + ... project_name="my_project", + ... config={"learning_rate": 0.001, "batch_size": 32}, + ... init_kwargs={"tensorboard": {"flush_secs": 60}}, + ... ) + ``` + """ + for tracker in self.log_with: + if issubclass(type(tracker), GeneralTracker): + # Custom trackers are already initialized + self.trackers.append(tracker) + else: + tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] + if tracker_init.requires_logging_directory: + # We can skip this check since it was done in `__init__` + self.trackers.append( + tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) + ) + else: + self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) + if config is not None: + for tracker in self.trackers: + tracker.store_init_configuration(config) + + def get_tracker(self, name: str, unwrap: bool = False): + """ + Returns a `tracker` from `self.trackers` based on `name` on the main process only. + + Args: + name (`str`): + The name of a tracker, corresponding to the `.name` property. + unwrap (`bool`): + Whether to return the internal tracking mechanism or to return the wrapped tracker instead + (recommended). + + Returns: + `GeneralTracker`: The tracker corresponding to `name` if it exists. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> tensorboard_tracker = accelerator.get_tracker("tensorboard") + ``` + """ + if len(self.trackers) > 0: + for tracker in self.trackers: + if tracker.name == name: + return tracker.tracker if unwrap else tracker + raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") + # Handle tracker only made on main process + return GeneralTracker(_blank=True) + + @on_main_process + def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}): + """ + Logs `values` to all stored trackers in `self.trackers` on the main process only. + + Args: + values (`dict`): + Values should be a dictionary-like object containing only types `int`, `float`, or `str`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + log_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted + like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> accelerator.log({"loss": 0.5, "accuracy": 0.9}) + ``` + """ + for tracker in self.trackers: + tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) + + @on_main_process + def end_training(self): + """ + Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be + called at the end of your script if using experiment tracking. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> # Do training + >>> accelerator.end_training() + ``` + """ + for tracker in self.trackers: + tracker.finish() + + def save(self, obj, f, safe_serialization=False): + """ + Save the object passed to disk once per machine. Use in place of `torch.save`. + + Args: + obj (`object`): The object to save. + f (`str` or `os.PathLike`): Where to save the content of `obj`. + safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` + + Note: + If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node, + rather than only once on the main node. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> arr = [0, 1, 2, 3] + >>> accelerator.save(arr, "array.pkl") + ``` + """ + save( + obj, + f, + save_on_each_node=self.project_configuration.save_on_each_node, + safe_serialization=safe_serialization, + ) + + def save_model( + self, + model: torch.nn.Module, + save_directory: Union[str, os.PathLike], + max_shard_size: Union[int, str] = "10GB", + safe_serialization: bool = True, + ): + """ + Save a model so that it can be re-loaded using load_checkpoint_in_model + + Arguments: + model: (`torch.nn.Module`): + Model to be saved. The model can be wrapped or unwraped. + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = ... + >>> accelerator.save_model(model, save_directory) + ``` + """ + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + # get the state_dict of the model + if any( + [ + module._hf_hook.offload + for module in model.modules() + if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) + ] + ): + state_dict = get_state_dict_offloaded_model(model) + else: + if any(param.device == torch.device("meta") for param in model.parameters()): + raise RuntimeError("You can't save the model since some parameters are on the meta device.") + state_dict = self.get_state_dict(model) + + if safe_serialization: + state_dict = clean_state_dict_for_safetensors(state_dict) + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + + # Shard the model if it is too big. + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + and reg.fullmatch(filename_no_suffix) is not None + and PartialState().is_main_process + ): + os.remove(full_filename) + + # Save the model + for shard_file, shard in shards.items(): + self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization) + + if index is None: + path_to_weights = os.path.join(save_directory, WEIGHTS_NAME) + logger.info(f"Model weights saved in {path_to_weights}") + else: + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, save_index_file) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.save_state`] before `save_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths` + argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed + to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save + configurations in addition to model weights. Can also be used to overwrite model saving with a customized + method. In this case, make sure to remove already loaded weights from the weights list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._save_model_state_pre_hook) + self._save_model_state_pre_hook[handle.id] = hook + return handle + + def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs): + """ + Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. + + If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled + then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater + than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named + `checkpoint_`. + + Otherwise they are just saved to `output_dir`. + + + + Should only be used when wanting to save a checkpoint during training and restoring the state in the same + environment. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + save_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for saving model which can be passed to the underlying save function, such + as optional arguments for DeepSpeed's `save_checkpoint` function. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.save_state(output_dir="my_checkpoint") + ``` + """ + if self.project_configuration.automatic_checkpoint_naming: + output_dir = os.path.join(self.project_dir, "checkpoints") + os.makedirs(output_dir, exist_ok=True) + if self.project_configuration.automatic_checkpoint_naming: + folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] + if ( + self.project_configuration.total_limit is not None + and (len(folders) + 1 > self.project_configuration.total_limit) + and self.is_main_process + ): + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + logger.warning( + f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." + ) + for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: + shutil.rmtree(folder) + output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") + if os.path.exists(output_dir): + raise ValueError( + f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." + ) + self.wait_for_everyone() + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving current state to {output_dir}") + + if self.distributed_type == DistributedType.XLA: + # Finish running the previous step before checkpointing + xm.mark_step() + + # Save the models taking care of FSDP and DeepSpeed nuances + weights = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Saving FSDP model") + save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i) + logger.info(f"FSDP Model saved to output dir {output_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Saving DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") + model.save_checkpoint(output_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") + else: + weights.append(self.get_state_dict(model, unwrap=False)) + + # Save the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Saving FSDP Optimizer") + save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i) + logger.info(f"FSDP Optimizer saved to output dir {output_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Save the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + # Save the samplers of the dataloaders + dataloaders = self._dataloaders + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._save_model_state_pre_hook.values(): + hook(self._models, weights, output_dir) + + save_location = save_accelerator_state( + output_dir, + weights, + optimizers, + schedulers, + dataloaders, + self.state.process_index, + self.scaler, + save_on_each_node=self.project_configuration.save_on_each_node, + safe_serialization=safe_serialization, + ) + for i, obj in enumerate(self._custom_objects): + save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node) + self.project_configuration.iteration += 1 + return save_location + + def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.load_state`] before `load_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the + `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load + configurations in addition to model weights. Can also be used to overwrite model loading with a customized + method. In this case, make sure to remove already loaded models from the models list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._load_model_state_pre_hook) + self._load_model_state_pre_hook[handle.id] = hook + return handle + + def load_state(self, input_dir: str = None, **load_model_func_kwargs): + """ + Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. + + + + Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for + checkpointing, it will not be loaded if stored in the directory. + + + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder all relevant weights and states were saved in. Can be `None` if + `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint. + load_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for loading model which can be passed to the underlying load function, + such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the + model and optimizer on. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.load_state("my_checkpoint") + ``` + """ + if input_dir is not None: + # Check if folder exists + input_dir = os.path.expanduser(input_dir) + if not os.path.isdir(input_dir): + raise ValueError(f"Tried to find {input_dir} but folder does not exist") + elif self.project_configuration.automatic_checkpoint_naming: + # Pick up from automatic checkpoint naming + input_dir = os.path.join(self.project_dir, "checkpoints") + folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)] + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + input_dir = folders[-1] + else: + raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.") + logger.info(f"Loading states from {input_dir}") + + # Load the models taking care of FSDP and DeepSpeed nuances + models = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Loading FSDP model") + load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i) + logger.info(f"FSDP Model loaded from input dir {input_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Loading DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") + model.load_checkpoint(input_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") + else: + models.append(model) + + # Load the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Loading FSDP Optimizer") + load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i) + logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Load the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + dataloaders = self._dataloaders + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._load_model_state_pre_hook.values(): + hook(models, input_dir) + + map_location = load_model_func_kwargs.pop("map_location", None) + if map_location is None: + if self.num_processes > 1 and self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + ): + map_location = "on_device" + else: + map_location = "cpu" + + load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + dataloaders, + self.state.process_index, + self.scaler, + map_location, + **load_model_func_kwargs, + ) + custom_checkpoints = [ + f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None + ] + if len(custom_checkpoints) != len(self._custom_objects): + err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:" + err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" + err += f"\n\tRegistered objects: {len(self._custom_objects)}\n" + err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects," + err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually." + raise RuntimeError(err) + else: + logger.info(f"Loading in {len(custom_checkpoints)} custom states") + for index, obj in enumerate(self._custom_objects): + load_custom_state(obj, input_dir, index) + + def free_memory(self, *objects): + """ + Will release all references to the internal objects stored and call the garbage collector. You should call this + method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> model, optimizer, scheduler = accelerator.free_memory(model, optimizer, scheduler) + ``` + """ + # Deepspeed needs a bit more prep that should be done first + if hasattr(self, "deepspeed_engine_wrapped"): + if self.deepspeed_engine_wrapped is not None: + self.deepspeed_engine_wrapped.engine.destroy() + self.deepspeed_engine_wrapped = None + objects = release_memory(*objects) + self._schedulers = [] + self._optimizers = [] + self._models = [] + self._dataloaders = [] + self.step = 0 + return objects + + def clear(self, *objects): + """ + Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the + garbage collector. You should call this method between two trainings with different models/optimizers. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> model, optimizer, scheduler = accelerator.clear(model, optimizer, scheduler) + ``` + """ + return self.free_memory(*objects) + + def _get_named_parameters(self, *args): + named_parameters = {} + for obj in args: + if isinstance(obj, torch.nn.Module): + obj = extract_model_from_parallel(obj) + named_parameters.update({n: p for n, p in obj.named_parameters()}) + return named_parameters + + def _get_devices(self, *args): + model_device = None + optimizer_device = None + for obj in args: + # Loop through model parameters and stop at the first once we have its device. + if isinstance(obj, torch.nn.Module): + for param in obj.parameters(): + model_device = param.device + break + # Loop through optimizer parameters groups and stop at the first once we have its device. + if isinstance(obj, torch.optim.Optimizer): + for param_group in obj.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + return (model_device, optimizer_device) + + def get_state_dict(self, model, unwrap=True): + """ + Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full + precision. + + Args: + model (`torch.nn.Module`): + A PyTorch model sent through [`Accelerator.prepare`] + unwrap (`bool`, *optional*, defaults to `True`): + Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict + + Returns: + `dict`: The state dictionary of the model potentially without full precision. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> net = torch.nn.Linear(2, 2) + >>> net = accelerator.prepare(net) + >>> state_dict = accelerator.get_state_dict(net) + ``` + """ + + if self.distributed_type == DistributedType.DEEPSPEED: + if self.deepspeed_config["zero_optimization"]["stage"] == 3: + if model.zero_gather_16bit_weights_on_model_save(): + state_dict = model._zero3_consolidated_16bit_state_dict() + else: + raise ValueError( + "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " + "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " + "set `zero3_save_16bit_model` to True when using `accelerate config`. " + "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." + ) + else: + from deepspeed.checkpoint.utils import clone_tensors_for_torch_save + + state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict()) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp import FullStateDictConfig, StateDictType + from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + + full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config): + state_dict = model.state_dict() + else: + if unwrap: + model = self.unwrap_model(model) + state_dict = model.state_dict() + + return state_dict + + def register_for_checkpointing(self, *objects): + """ + Makes note of `objects` and will save or load them in during `save_state` or `load_state`. + + These should be utilized when the state is being loaded or saved in the same script. It is not designed to be + used in different scripts. + + + + Every `object` must have a `load_state_dict` and `state_dict` function to be stored. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function. + >>> obj = CustomObject() + >>> accelerator.register_for_checkpointing(obj) + >>> accelerator.save_state("checkpoint.pt") + ``` + """ + invalid_objects = [] + for obj in objects: + if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): + invalid_objects.append(obj) + if len(invalid_objects) > 0: + err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" + for index, obj in enumerate(invalid_objects): + err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" + raise ValueError(err) + self._custom_objects.extend(objects) + + @contextmanager + def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None): + """ + Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing + different will happen otherwise. + + A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is + useful in blocks under `autocast` where you want to revert to fp32. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(mixed_precision="fp16") + >>> with accelerator.autocast(): + ... train() + ``` + """ + if cache_enabled: + warnings.warn( + "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. " + "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.", + FutureWarning, + ) + if self.autocast_handler is not None: + self.autocast_handler.cache_enabled = True + else: + self.autocast_handler = AutocastKwargs(cache_enabled=True) + if autocast_handler is None: + autocast_handler = self.autocast_handler + autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler) + autocast_context.__enter__() + # TODO: should the `yield` be in a try/finally block? + yield + autocast_context.__exit__(*sys.exc_info()) + + @property + def optimizer_step_was_skipped(self): + """ + Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which + case the learning rate should not be changed. + """ + for optimizer in self._optimizers: + if optimizer.step_was_skipped: + return True + return False + + def skip_first_batches(self, dataloader, num_batches: int = 0): + """ + Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. + + Args: + dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches. + num_batches (`int`, *optional*, defaults to 0): The number of batches to skip + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2) + >>> # for the first epoch only + >>> for input, target in skipped_dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... optimizer.step() + + >>> # subsequent epochs + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... ... + ``` + """ + return skip_first_batches(dataloader, num_batches=num_batches) + + def __deepcopy__(self, memo): + logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.") + return self + + def verify_device_map(self, model: torch.nn.Module) -> bool: + """ + Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`. + """ + # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry. + for m in model.modules(): + if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1: + return True + + return False + + def lomo_backward(self, loss: torch.Tensor, learning_rate: float) -> None: + """ + Runs backward pass on LOMO optimizers. + """ + if is_lomo_available(): + # We need to import locally to avoid circular imports since lomo imports stuff from + # transformers & accelerate + from lomo_optim import AdaLomo, Lomo + + if learning_rate is None: + raise ValueError("A learning rate must be passed in order to call backward pass with LOMO optimizers.") + + _backward_called = False + + for optimizer in self._optimizers: + if isinstance(optimizer.optimizer, (Lomo, AdaLomo)): + optimizer.optimizer.fused_backward(loss, learning_rate) + _backward_called = True + + if not _backward_called: + raise ValueError( + "Backward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/big_modeling.py b/llmeval-env/lib/python3.10/site-packages/accelerate/big_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..94febb5d3dde35689d99ebbf26c1c78346f0ab17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/big_modeling.py @@ -0,0 +1,627 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +from contextlib import contextmanager +from functools import wraps +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn + +from .hooks import ( + AlignDevicesHook, + CpuOffload, + UserCpuOffloadHook, + add_hook_to_module, + attach_align_device_hook, + attach_align_device_hook_on_blocks, +) +from .utils import ( + OffloadedWeightsLoader, + check_cuda_p2p_ib_support, + check_device_map, + extract_submodules_state_dict, + find_tied_parameters, + get_balanced_memory, + infer_auto_device_map, + is_mlu_available, + is_npu_available, + is_torch_version, + is_xpu_available, + load_checkpoint_in_model, + offload_state_dict, + parse_flag_from_env, + retie_parameters, +) +from .utils.other import recursive_getattr + + +logger = logging.getLogger(__name__) + + +@contextmanager +def init_empty_weights(include_buffers: bool = None): + """ + A context manager under which models are initialized with all parameters on the meta device, therefore creating an + empty model. Useful when just initializing the model would blow the available RAM. + + Args: + include_buffers (`bool`, *optional*): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_empty_weights + + # Initialize a model with 100 billions parameters in no time and without using any RAM. + with init_empty_weights(): + tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) + ``` + + + + Any model created under this context manager has no weights. As such you can't do something like + `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. + Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not + called. + + + """ + if include_buffers is None: + include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) + with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: + yield f + + +@contextmanager +def init_on_device(device: torch.device, include_buffers: bool = None): + """ + A context manager under which models are initialized with all parameters on the specified device. + + Args: + device (`torch.device`): + Device to initialize all parameters on. + include_buffers (`bool`, *optional*): + Whether or not to also put all buffers on the meta device while initializing. + + Example: + + ```python + import torch.nn as nn + from accelerate import init_on_device + + with init_on_device(device=torch.device("cuda")): + tst = nn.Liner(100, 100) # on `cuda` device + ``` + """ + if include_buffers is None: + include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) + + # TODO(shingjan): remove the torch version check once older versions are deprecated + if is_torch_version(">=", "2.0") and include_buffers: + with device: + yield + return + + old_register_parameter = nn.Module.register_parameter + if include_buffers: + old_register_buffer = nn.Module.register_buffer + + def register_empty_parameter(module, name, param): + old_register_parameter(module, name, param) + if param is not None: + param_cls = type(module._parameters[name]) + kwargs = module._parameters[name].__dict__ + kwargs["requires_grad"] = param.requires_grad + module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) + + def register_empty_buffer(module, name, buffer, persistent=True): + old_register_buffer(module, name, buffer, persistent=persistent) + if buffer is not None: + module._buffers[name] = module._buffers[name].to(device) + + # Patch tensor creation + if include_buffers: + tensor_constructors_to_patch = { + torch_function_name: getattr(torch, torch_function_name) + for torch_function_name in ["empty", "zeros", "ones", "full"] + } + else: + tensor_constructors_to_patch = {} + + def patch_tensor_constructor(fn): + def wrapper(*args, **kwargs): + kwargs["device"] = device + return fn(*args, **kwargs) + + return wrapper + + try: + nn.Module.register_parameter = register_empty_parameter + if include_buffers: + nn.Module.register_buffer = register_empty_buffer + for torch_function_name in tensor_constructors_to_patch.keys(): + setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) + yield + finally: + nn.Module.register_parameter = old_register_parameter + if include_buffers: + nn.Module.register_buffer = old_register_buffer + for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): + setattr(torch, torch_function_name, old_torch_function) + + +def cpu_offload( + model: nn.Module, + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + state_dict: Optional[Dict[str, torch.Tensor]] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one + copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that + state dict and put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): + The model to offload. + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the model that will be kept on CPU. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if execution_device is None: + execution_device = next(iter(model.parameters())).device + if state_dict is None: + state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=state_dict, + preload_module_classes=preload_module_classes, + ) + + return model + + +def cpu_offload_with_hook( + model: torch.nn.Module, + execution_device: Optional[Union[int, str, torch.device]] = None, + prev_module_hook: Optional[UserCpuOffloadHook] = None, +): + """ + Offloads a model on the CPU and puts it back to an execution device when executed. The difference with + [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when + the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. + + Args: + model (`torch.nn.Module`): + The model to offload. + execution_device(`str`, `int` or `torch.device`, *optional*): + The device on which the model should be executed. Will default to the MPS device if it's available, then + GPU 0 if there is a GPU, and finally to the CPU. + prev_module_hook (`UserCpuOffloadHook`, *optional*): + The hook sent back by this function for a previous model in the pipeline you are running. If passed, its + offload method will be called just before the forward of the model to which this hook is attached. + + Example: + + ```py + model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) + model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) + model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) + + hid_1 = model_1(input) + for i in range(50): + # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. + hid_2 = model_2(hid_1) + # model2 is offloaded to the CPU just before this forward. + hid_3 = model_3(hid_3) + + # For model3, you need to manually call the hook offload method. + hook_3.offload() + ``` + """ + hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook) + add_hook_to_module(model, hook, append=True) + user_hook = UserCpuOffloadHook(model, hook) + return model, user_hook + + +def disk_offload( + model: nn.Module, + offload_dir: Union[str, os.PathLike], + execution_device: Optional[torch.device] = None, + offload_buffers: bool = False, + preload_module_classes: Optional[List[str]] = None, +): + """ + Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as + memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and + put on the execution device passed as they are needed, then offloaded again. + + Args: + model (`torch.nn.Module`): The model to offload. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + execution_device (`torch.device`, *optional*): + The device on which the forward pass of the model will be executed (should be a GPU). Will default to the + model's first parameter device. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): + offload_state_dict(offload_dir, model.state_dict()) + if execution_device is None: + execution_device = next(iter(model.parameters())).device + weights_map = OffloadedWeightsLoader(save_folder=offload_dir) + + add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) + attach_align_device_hook( + model, + execution_device=execution_device, + offload=True, + offload_buffers=offload_buffers, + weights_map=weights_map, + preload_module_classes=preload_module_classes, + ) + + return model + + +def dispatch_model( + model: nn.Module, + device_map: Dict[str, Union[str, int, torch.device]], + main_device: Optional[torch.device] = None, + state_dict: Optional[Dict[str, torch.Tensor]] = None, + offload_dir: Optional[Union[str, os.PathLike]] = None, + offload_index: Optional[Dict[str, str]] = None, + offload_buffers: bool = False, + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, + force_hooks: bool = False, +): + """ + Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on + the CPU or even the disk. + + Args: + model (`torch.nn.Module`): + The model to dispatch. + device_map (`Dict[str, Union[str, int, torch.device]]`): + A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that + `"disk"` is accepted even if it's not a proper value for `torch.device`. + main_device (`str`, `int` or `torch.device`, *optional*): + The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or + `"disk"`. + state_dict (`Dict[str, torch.Tensor]`, *optional*): + The state dict of the part of the model that will be kept on CPU. + offload_dir (`str` or `os.PathLike`): + The folder in which to offload the model weights (or where the model weights are already offloaded). + offload_index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to offload the buffers with the model parameters. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + force_hooks (`bool`, *optional*, defaults to `False`): + Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a + single device. + """ + # Error early if the device map is incomplete. + check_device_map(model, device_map) + + # for backward compatibility + is_bnb_quantized = ( + getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False) + ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes" + + # We attach hooks if the device_map has at least 2 different devices or if + # force_hooks is set to `True`. Otherwise, the model in already loaded + # in the unique device and the user can decide where to dispatch the model. + # If the model is quantized, we always force-dispatch the model + if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks: + if main_device is None: + if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: + main_device = "cpu" + else: + main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] + + if main_device != "cpu": + cpu_modules = [name for name, device in device_map.items() if device == "cpu"] + if state_dict is None and len(cpu_modules) > 0: + state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) + + disk_modules = [name for name, device in device_map.items() if device == "disk"] + if offload_dir is None and offload_index is None and len(disk_modules) > 0: + raise ValueError( + "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " + f"need to be offloaded: {', '.join(disk_modules)}." + ) + if ( + len(disk_modules) > 0 + and offload_index is None + and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) + ): + disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) + offload_state_dict(offload_dir, disk_state_dict) + + execution_device = { + name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() + } + execution_device[""] = main_device + offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] + offload = {name: device in offloaded_devices for name, device in device_map.items()} + save_folder = offload_dir if len(disk_modules) > 0 else None + if state_dict is not None or save_folder is not None or offload_index is not None: + device = main_device if offload_index is not None else None + weights_map = OffloadedWeightsLoader( + state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device + ) + else: + weights_map = None + + # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the + # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its + # original pointer) on each devices. + tied_params = find_tied_parameters(model) + + tied_params_map = {} + for group in tied_params: + for param_name in group: + # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need + # to care about views of tensors through storage_offset. + data_ptr = recursive_getattr(model, param_name).data_ptr() + tied_params_map[data_ptr] = {} + + # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, + # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. + + attach_align_device_hook_on_blocks( + model, + execution_device=execution_device, + offload=offload, + offload_buffers=offload_buffers, + weights_map=weights_map, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + tied_params_map=tied_params_map, + ) + + # warn if there is any params on the meta device + offloaded_devices_str = " and ".join( + [device for device in set(device_map.values()) if device in ("cpu", "disk")] + ) + if len(offloaded_devices_str) > 0: + logging.warning( + f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}." + ) + + # Attaching the hook may break tied weights, so we retie them + retie_parameters(model, tied_params) + + # add warning to cuda and to method + def add_warning(fn, model): + @wraps(fn) + def wrapper(*args, **kwargs): + warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." + if str(fn.__name__) == "to": + to_device = torch._C._nn._parse_to(*args, **kwargs)[0] + if to_device is not None: + logger.warning(warning_msg) + else: + logger.warning(warning_msg) + for param in model.parameters(): + if param.device == torch.device("meta"): + raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") + return fn(*args, **kwargs) + + return wrapper + + model.to = add_warning(model.to, model) + if is_npu_available(): + model.npu = add_warning(model.npu, model) + elif is_mlu_available(): + model.mlu = add_warning(model.mlu, model) + elif is_xpu_available(): + model.xpu = add_warning(model.xpu, model) + else: + model.cuda = add_warning(model.cuda, model) + + # Check if we are using multi-gpus with RTX 4000 series + use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1 + if use_multi_gpu and not check_cuda_p2p_ib_support(): + logger.warning( + "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. " + "This can affect the multi-gpu inference when using accelerate device_map." + "Please make sure to update your driver to the latest version which resolves this." + ) + else: + device = list(device_map.values())[0] + # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). + if is_npu_available() and isinstance(device, int): + device = f"npu:{device}" + elif is_mlu_available() and isinstance(device, int): + device = f"mlu:{device}" + elif is_xpu_available() and isinstance(device, int): + device = f"xpu:{device}" + if device != "disk": + model.to(device) + else: + raise ValueError( + "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." + ) + # Convert OrderedDict back to dict for easier usage + model.hf_device_map = dict(device_map) + return model + + +def load_checkpoint_and_dispatch( + model: nn.Module, + checkpoint: Union[str, os.PathLike], + device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, + max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, + no_split_module_classes: Optional[List[str]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = None, + offload_buffers: bool = False, + dtype: Optional[Union[str, torch.dtype]] = None, + offload_state_dict: Optional[bool] = None, + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, + force_hooks: bool = False, + strict: bool = False, +): + """ + Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are + loaded and adds the various hooks that will make this model run properly (even if split across devices). + + Args: + model (`torch.nn.Module`): The model in which we want to load a checkpoint. + checkpoint (`str` or `os.PathLike`): + The folder checkpoint to load. It can be: + - a path to a file containing a whole model state dict + - a path to a `.json` file containing the index to a sharded checkpoint + - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. + device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer + name, once a given module name is inside, every submodule of it will be sent to the same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more + information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). + Defaults to None, which means [`dispatch_model`] will not be called. + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU + and the available CPU RAM if unset. + no_split_module_classes (`List[str]`, *optional*): + A list of layer class names that should never be split across device (for instance any layer that has a + residual connection). + offload_folder (`str` or `os.PathLike`, *optional*): + If the `device_map` contains any value `"disk"`, the folder where we will offload weights. + offload_buffers (`bool`, *optional*, defaults to `False`): + In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as + well as the parameters. + dtype (`str` or `torch.dtype`, *optional*): + If provided, the weights will be converted to that type when loaded. + offload_state_dict (`bool`, *optional*): + If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if + the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map + picked contains `"disk"` values. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + force_hooks (`bool`, *optional*, defaults to `False`): + Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a + single device. + strict (`bool`, *optional*, defaults to `False`): + Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's + state_dict. + + Example: + + ```python + >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch + >>> from huggingface_hub import hf_hub_download + >>> from transformers import AutoConfig, AutoModelForCausalLM + + >>> # Download the Weights + >>> checkpoint = "EleutherAI/gpt-j-6B" + >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") + + >>> # Create a model and initialize it with empty weights + >>> config = AutoConfig.from_pretrained(checkpoint) + >>> with init_empty_weights(): + ... model = AutoModelForCausalLM.from_config(config) + + >>> # Load the checkpoint and dispatch it to the right devices + >>> model = load_checkpoint_and_dispatch( + ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] + ... ) + ``` + """ + if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: + raise ValueError( + "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " + "'sequential'." + ) + if isinstance(device_map, str): + if device_map != "sequential": + max_memory = get_balanced_memory( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=dtype, + low_zero=(device_map == "balanced_low_0"), + ) + device_map = infer_auto_device_map( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=dtype, + offload_buffers=offload_buffers, + ) + if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): + offload_state_dict = True + load_checkpoint_in_model( + model, + checkpoint, + device_map=device_map, + offload_folder=offload_folder, + dtype=dtype, + offload_state_dict=offload_state_dict, + offload_buffers=offload_buffers, + strict=strict, + ) + if device_map is None: + return model + return dispatch_model( + model, + device_map=device_map, + offload_dir=offload_folder, + offload_buffers=offload_buffers, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + force_hooks=force_hooks, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/checkpointing.py b/llmeval-env/lib/python3.10/site-packages/accelerate/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..d76881990417d4e453c0e95b244497d24f730236 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/checkpointing.py @@ -0,0 +1,273 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from pathlib import Path +from typing import List + +import numpy as np +import torch +from safetensors.torch import load_file +from torch.cuda.amp import GradScaler + +from .utils import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + WEIGHTS_NAME, + get_pretty_name, + is_torch_xla_available, + is_xpu_available, + save, +) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +from .logging import get_logger +from .state import PartialState + + +logger = get_logger(__name__) + + +def save_accelerator_state( + output_dir: str, + model_states: List[dict], + optimizers: list, + schedulers: list, + dataloaders: list, + process_index: int, + scaler: GradScaler = None, + save_on_each_node: bool = False, + safe_serialization: bool = True, +): + """ + Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. + + + + If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native + `pickle`. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + model_states (`List[torch.nn.Module]`): + A list of model states + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + dataloaders (`List[torch.utils.data.DataLoader]`): + A list of dataloader instances to save their sampler states + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional gradient scaler instance to save + save_on_each_node (`bool`, *optional*): + Whether to save on every node, or only the main node. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + """ + output_dir = Path(output_dir) + # Model states + for i, state in enumerate(model_states): + weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME + if i > 0: + weights_name = weights_name.replace(".", f"_{i}.") + output_model_file = output_dir.joinpath(weights_name) + save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) + logger.info(f"Model weights saved in {output_model_file}") + # Optimizer states + for i, opt in enumerate(optimizers): + state = opt.state_dict() + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + output_optimizer_file = output_dir.joinpath(optimizer_name) + save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + # Scheduler states + for i, scheduler in enumerate(schedulers): + state = scheduler.state_dict() + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + output_scheduler_file = output_dir.joinpath(scheduler_name) + save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Scheduler state saved in {output_scheduler_file}") + # DataLoader states + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + output_sampler_file = output_dir.joinpath(sampler_name) + # Only save if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.get_sampler() + if isinstance(sampler, SeedableRandomSampler): + save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") + + # GradScaler state + if scaler is not None: + state = scaler.state_dict() + output_scaler_file = output_dir.joinpath(SCALER_NAME) + torch.save(state, output_scaler_file) + logger.info(f"Gradient scaler state saved in {output_scaler_file}") + # Random number generator states + states = {} + states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" + states["random_state"] = random.getstate() + states["numpy_random_seed"] = np.random.get_state() + states["torch_manual_seed"] = torch.get_rng_state() + if is_xpu_available(): + states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() + else: + states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() + if is_torch_xla_available(): + states["xm_seed"] = xm.get_rng_state() + output_states_file = output_dir.joinpath(states_name) + torch.save(states, output_states_file) + logger.info(f"Random states saved in {output_states_file}") + return output_dir + + +def load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + dataloaders, + process_index, + scaler=None, + map_location=None, + **load_model_func_kwargs, +): + """ + Loads states of the models, optimizers, scaler, and RNG generators from a given directory. + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder to load all relevant weights and states. + models (`List[torch.nn.Module]`): + A list of model instances + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional *GradScaler* instance to load + map_location (`str`, *optional*): + What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". + load_model_func_kwargs (`dict`, *optional*): + Additional arguments that can be passed to the model's `load_state_dict` method. + """ + if map_location not in [None, "cpu", "on_device"]: + raise TypeError( + "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" + ) + if map_location is None: + map_location = "cpu" + elif map_location == "on_device": + map_location = PartialState().device + + input_dir = Path(input_dir) + # Model states + for i, model in enumerate(models): + ending = f"_{i}" if i > 0 else "" + input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") + if input_model_file.exists(): + state_dict = load_file(input_model_file, device=str(map_location)) + else: + # Load with torch + input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") + state_dict = torch.load(input_model_file, map_location=map_location) + models[i].load_state_dict(state_dict, **load_model_func_kwargs) + logger.info("All model weights loaded successfully") + + # Optimizer states + for i, opt in enumerate(optimizers): + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + input_optimizer_file = input_dir.joinpath(optimizer_name) + optimizer_state = torch.load(input_optimizer_file, map_location=map_location) + optimizers[i].load_state_dict(optimizer_state) + logger.info("All optimizer states loaded successfully") + + # Scheduler states + for i, scheduler in enumerate(schedulers): + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + input_scheduler_file = input_dir.joinpath(scheduler_name) + scheduler.load_state_dict(torch.load(input_scheduler_file)) + logger.info("All scheduler states loaded successfully") + + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + input_sampler_file = input_dir.joinpath(sampler_name) + # Only load if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.get_sampler() + if isinstance(sampler, SeedableRandomSampler): + sampler = dataloader.set_sampler(torch.load(input_sampler_file)) + logger.info("All dataloader sampler states loaded successfully") + + # GradScaler state + if scaler is not None: + input_scaler_file = input_dir.joinpath(SCALER_NAME) + scaler.load_state_dict(torch.load(input_scaler_file)) + logger.info("GradScaler state loaded successfully") + + # Random states + try: + states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) + random.setstate(states["random_state"]) + np.random.set_state(states["numpy_random_seed"]) + torch.set_rng_state(states["torch_manual_seed"]) + if is_xpu_available(): + torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) + else: + torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) + if is_torch_xla_available(): + xm.set_rng_state(states["xm_seed"]) + logger.info("All random states loaded successfully") + except Exception: + logger.info("Could not load random states") + + +def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): + """ + Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` + """ + # Should this be the right way to get a qual_name type value from `obj`? + save_location = Path(path) / f"custom_checkpoint_{index}.pkl" + logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") + save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node) + + +def load_custom_state(obj, path, index: int = 0): + """ + Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` + """ + load_location = f"{path}/custom_checkpoint_{index}.pkl" + logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") + obj.load_state_dict(torch.load(load_location, map_location="cpu")) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5a08abf51a83ca048524ea0b8758f9d52b7edc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from accelerate.commands.config import get_config_parser +from accelerate.commands.env import env_command_parser +from accelerate.commands.estimate import estimate_command_parser +from accelerate.commands.launch import launch_command_parser +from accelerate.commands.test import test_command_parser +from accelerate.commands.tpu import tpu_command_parser +from accelerate.commands.utils import CustomArgumentParser + + +def main(): + parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate []", allow_abbrev=False) + subparsers = parser.add_subparsers(help="accelerate command helpers") + + # Register commands + get_config_parser(subparsers=subparsers) + estimate_command_parser(subparsers=subparsers) + env_command_parser(subparsers=subparsers) + launch_command_parser(subparsers=subparsers) + tpu_command_parser(subparsers=subparsers) + test_command_parser(subparsers=subparsers) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3491fee0ad28df82683a89d128bbc097053c2f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os + +from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES +from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType +from ...utils.imports import is_boto3_available +from .config_args import SageMakerConfig +from .config_utils import ( + DYNAMO_BACKENDS, + _ask_field, + _ask_options, + _convert_dynamo_backend, + _convert_mixed_precision, + _convert_sagemaker_distributed_mode, + _convert_yes_no_to_bool, +) + + +if is_boto3_available(): + import boto3 # noqa: F401 + + +def _create_iam_role_for_sagemaker(role_name): + iam_client = boto3.client("iam") + + sagemaker_trust_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} + ], + } + try: + # create the role, associated with the chosen trust policy + iam_client.create_role( + RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2) + ) + policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sagemaker:*", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetAuthorizationToken", + "cloudwatch:PutMetricData", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:GetLogEvents", + "s3:CreateBucket", + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:PutObject", + ], + "Resource": "*", + } + ], + } + # attach policy to role + iam_client.put_role_policy( + RoleName=role_name, + PolicyName=f"{role_name}_policy_permission", + PolicyDocument=json.dumps(policy_document, indent=2), + ) + except iam_client.exceptions.EntityAlreadyExistsException: + print(f"role {role_name} already exists. Using existing one") + + +def _get_iam_role_arn(role_name): + iam_client = boto3.client("iam") + return iam_client.get_role(RoleName=role_name)["Role"]["Arn"] + + +def get_sagemaker_input(): + credentials_configuration = _ask_options( + "How do you want to authorize?", + ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], + int, + ) + aws_profile = None + if credentials_configuration == 0: + aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default") + os.environ["AWS_PROFILE"] = aws_profile + else: + print( + "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," + "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" + ) + aws_access_key_id = _ask_field("AWS Access Key ID: ") + os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id + + aws_secret_access_key = _ask_field("AWS Secret Access Key: ") + os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key + + aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1") + os.environ["AWS_DEFAULT_REGION"] = aws_region + + role_management = _ask_options( + "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", + ["Provide IAM Role name", "Create new IAM role using credentials"], + int, + ) + if role_management == 0: + iam_role_name = _ask_field("Enter your IAM role name: ") + else: + iam_role_name = "accelerate_sagemaker_execution_role" + print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials') + _create_iam_role_for_sagemaker(iam_role_name) + + is_custom_docker_image = _ask_field( + "Do you want to use custom Docker image? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + docker_image = None + if is_custom_docker_image: + docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower()) + + is_sagemaker_inputs_enabled = _ask_field( + "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_inputs_file = None + if is_sagemaker_inputs_enabled: + sagemaker_inputs_file = _ask_field( + "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", + lambda x: str(x).lower(), + ) + + is_sagemaker_metrics_enabled = _ask_field( + "Do you want to enable SageMaker metrics? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + sagemaker_metrics_file = None + if is_sagemaker_metrics_enabled: + sagemaker_metrics_file = _ask_field( + "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", + lambda x: str(x).lower(), + ) + + distributed_type = _ask_options( + "What is the distributed mode?", + ["No distributed training", "Data parallelism"], + _convert_sagemaker_distributed_mode, + ) + dynamo_config = {} + use_dynamo = _ask_field( + "Do you wish to optimize your script with torch dynamo?[yes/NO]:", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + if use_dynamo: + prefix = "dynamo_" + dynamo_config[prefix + "backend"] = _ask_options( + "Which dynamo backend would you like to use?", + [x.lower() for x in DYNAMO_BACKENDS], + _convert_dynamo_backend, + default=2, + ) + use_custom_options = _ask_field( + "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + if use_custom_options: + dynamo_config[prefix + "mode"] = _ask_options( + "Which mode do you want to use?", + TORCH_DYNAMO_MODES, + lambda x: TORCH_DYNAMO_MODES[int(x)], + default="default", + ) + dynamo_config[prefix + "use_fullgraph"] = _ask_field( + "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + dynamo_config[prefix + "use_dynamic"] = _ask_field( + "Do you want to enable dynamic shape tracing? [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + ec2_instance_query = "Which EC2 instance type you want to use for your training?" + if distributed_type != SageMakerDistributedType.NO: + ec2_instance_type = _ask_options( + ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)] + ) + else: + ec2_instance_query += "? [ml.p3.2xlarge]:" + ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge") + + debug = False + if distributed_type != SageMakerDistributedType.NO: + debug = _ask_field( + "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", + _convert_yes_no_to_bool, + default=False, + error_message="Please enter yes or no.", + ) + + num_machines = 1 + if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): + num_machines = _ask_field( + "How many machines do you want use? [1]: ", + int, + default=1, + ) + + mixed_precision = _ask_options( + "Do you wish to use FP16 or BF16 (mixed precision)?", + ["no", "fp16", "bf16", "fp8"], + _convert_mixed_precision, + ) + + if use_dynamo and mixed_precision == "no": + print( + "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." + ) + + return SageMakerConfig( + image_uri=docker_image, + compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, + distributed_type=distributed_type, + use_cpu=False, + dynamo_config=dynamo_config, + ec2_instance_type=ec2_instance_type, + profile=aws_profile, + region=aws_region, + iam_role_name=iam_role_name, + mixed_precision=mixed_precision, + num_machines=num_machines, + sagemaker_inputs_file=sagemaker_inputs_file, + sagemaker_metrics_file=sagemaker_metrics_file, + debug=debug, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/env.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..d9345326158c9dd2cb7a4f410a93d0e59f0f89eb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/env.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import platform +import subprocess + +import numpy as np +import psutil +import torch + +from accelerate import __version__ as version +from accelerate.commands.config import default_config_file, load_config_from_file + +from ..utils import is_mlu_available, is_npu_available, is_xpu_available + + +def env_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("env") + else: + parser = argparse.ArgumentParser("Accelerate env command") + + parser.add_argument( + "--config_file", default=None, help="The config file to use for the default values in the launching script." + ) + + if subparsers is not None: + parser.set_defaults(func=env_command) + return parser + + +def env_command(args): + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + pt_xpu_available = is_xpu_available() + pt_mlu_available = is_mlu_available() + pt_npu_available = is_npu_available() + + accelerate_config = "Not found" + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file): + accelerate_config = load_config_from_file(args.config_file).to_dict() + + # if we can run which, get it + command = None + bash_location = "Not found" + if os.name == "nt": + command = ["where", "accelerate"] + elif os.name == "posix": + command = ["which", "accelerate"] + if command is not None: + bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip() + info = { + "`Accelerate` version": version, + "Platform": platform.platform(), + "`accelerate` bash location": bash_location, + "Python version": platform.python_version(), + "Numpy version": np.__version__, + "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", + "PyTorch XPU available": str(pt_xpu_available), + "PyTorch NPU available": str(pt_npu_available), + "PyTorch MLU available": str(pt_mlu_available), + "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", + } + if pt_cuda_available: + info["GPU type"] = torch.cuda.get_device_name() + if pt_npu_available: + info["CANN version"] = torch.version.cann + + print("\nCopy-and-paste the text below in your GitHub issue\n") + print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) + + print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") + accelerate_config_str = ( + "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) + if isinstance(accelerate_config, dict) + else f"\t{accelerate_config}" + ) + print(accelerate_config_str) + + info["`Accelerate` configs"] = accelerate_config + + return info + + +def main() -> int: + parser = env_command_parser() + args = parser.parse_args() + env_command(args) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/estimate.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/estimate.py new file mode 100644 index 0000000000000000000000000000000000000000..56da3c5ad9e953687fab71dfc1fb0a878309d1d6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/estimate.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from huggingface_hub import model_info +from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError + +from accelerate import init_empty_weights +from accelerate.commands.utils import CustomArgumentParser +from accelerate.utils import ( + calculate_maximum_sizes, + convert_bytes, + is_timm_available, + is_transformers_available, +) + + +if is_transformers_available(): + import transformers + from transformers import AutoConfig, AutoModel + +if is_timm_available(): + import timm + + +def verify_on_hub(repo: str, token: str = None): + "Verifies that the model is on the hub and returns the model info." + try: + return model_info(repo, token=token) + except GatedRepoError: + return "gated" + except RepositoryNotFoundError: + return "repo" + + +def check_has_model(error): + """ + Checks what library spawned `error` when a model is not found + """ + if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: + return "timm" + elif ( + is_transformers_available() + and isinstance(error, OSError) + and "does not appear to have a file named" in error.args[0] + ): + return "transformers" + else: + return "unknown" + + +def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): + """ + Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. + + Args: + model_name (`str`): + The model name on the Hub + library_name (`str`): + The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no + metadata on the Hub to determine the library. + trust_remote_code (`bool`, `optional`, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + access_token (`str`, `optional`, defaults to `None`): + The access token to use to access private or gated models on the Hub. (for use on the Gradio app) + + Returns: + `torch.nn.Module`: The torch model that has been initialized on the `meta` device. + + """ + model_info = verify_on_hub(model_name, access_token) + # Simplified errors + if model_info == "gated": + raise GatedRepoError( + f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." + ) + elif model_info == "repo": + raise RepositoryNotFoundError( + f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," + " make sure you are authenticated via `huggingface-cli login` and have access." + ) + if library_name is None: + library_name = getattr(model_info, "library_name", False) + if not library_name: + raise ValueError( + f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" + ) + if library_name == "transformers": + if not is_transformers_available(): + raise ImportError( + f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" + ) + print(f"Loading pretrained config for `{model_name}` from `transformers`...") + if model_info.config is None: + raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") + + auto_map = model_info.config.get("auto_map", False) + config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) + with init_empty_weights(): + # remote code could specify a specific `AutoModel` class in the `auto_map` + constructor = AutoModel + if isinstance(auto_map, dict): + value = None + for key in auto_map.keys(): + if key.startswith("AutoModelFor"): + value = key + break + if value is not None: + constructor = getattr(transformers, value) + model = constructor.from_config(config, trust_remote_code=trust_remote_code) + elif library_name == "timm": + if not is_timm_available(): + raise ImportError( + f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" + ) + print(f"Loading pretrained config for `{model_name}` from `timm`...") + with init_empty_weights(): + model = timm.create_model(model_name, pretrained=False) + else: + raise ValueError( + f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." + ) + return model + + +def create_ascii_table(headers: list, rows: list, title: str): + "Creates a pretty table from a list of rows, minimal version of `tabulate`." + sep_char, in_between = "│", "─" + column_widths = [] + for i in range(len(headers)): + column_values = [row[i] for row in rows] + [headers[i]] + max_column_width = max(len(value) for value in column_values) + column_widths.append(max_column_width) + + formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] + + pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" + diff = 0 + + def make_row(left_char, middle_char, right_char): + return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" + + separator = make_row("├", "┼", "┤") + if len(title) > sum(column_widths): + diff = abs(len(title) - len(separator)) + column_widths[-1] += diff + + # Update with diff + separator = make_row("├", "┼", "┤") + initial_rows = [ + make_row("┌", in_between, "┐"), + f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", + make_row("├", "┬", "┤"), + ] + table = "\n".join(initial_rows) + "\n" + column_widths[-1] += diff + centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] + table += f"{pattern % tuple(centered_line)}\n{separator}\n" + for i, line in enumerate(rows): + centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] + table += f"{pattern % tuple(centered_line)}\n" + table += f'└{"┴".join([in_between * n for n in column_widths])}┘' + + return table + + +def estimate_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("estimate-memory") + else: + parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") + + parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") + parser.add_argument( + "--library_name", + type=str, + help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", + choices=["timm", "transformers"], + ) + parser.add_argument( + "--dtypes", + type=str, + nargs="+", + default=["float32", "float16", "int8", "int4"], + help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", + choices=["float32", "float16", "int8", "int4"], + ) + parser.add_argument( + "--trust_remote_code", + action="store_true", + help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag + should only be used for repositories you trust and in which you have read the code, as it will execute + code present on the Hub on your local machine.""", + default=False, + ) + + if subparsers is not None: + parser.set_defaults(func=estimate_command) + return parser + + +def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: + """ + Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of + 1. + + Args: + bytes (`int`): + The size of the model being trained. + mixed_precision (`str`): + The mixed precision that would be ran. + msamp_config (`str`): + The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. + """ + memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} + fp32_size = bytes + fp16_size = bytes // 2 + + if mixed_precision == "float32": + memory_sizes["model"] = fp32_size + memory_sizes["gradients"] = fp32_size + memory_sizes["optimizer"] = fp32_size * 2 + memory_sizes["step"] = fp32_size * 4 + elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): + # With native `TransformersEngine`, there is no memory savings with FP8 + # With mixed precision training, the model has weights stored + # in FP16 and FP32 + memory_sizes["model"] = fp32_size + # 1.5 from weight gradient + computation (GEMM) + memory_sizes["gradients"] = fp32_size + fp16_size + # 2x from optimizer states + memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states + memory_sizes["step"] = memory_sizes["optimizer"] + return memory_sizes + + +def gather_data(args): + "Creates an empty model and gathers the data for the sizes" + try: + model = create_empty_model( + args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code + ) + except (RuntimeError, OSError) as e: + library = check_has_model(e) + if library != "unknown": + raise RuntimeError( + f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." + ) + raise e + + total_size, largest_layer = calculate_maximum_sizes(model) + + data = [] + + for dtype in args.dtypes: + dtype_total_size = total_size + dtype_largest_layer = largest_layer[0] + dtype_training_size = estimate_training_usage(dtype_total_size, dtype) + if dtype == "float16": + dtype_total_size /= 2 + dtype_largest_layer /= 2 + elif dtype == "int8": + dtype_total_size /= 4 + dtype_largest_layer /= 4 + elif dtype == "int4": + dtype_total_size /= 8 + dtype_largest_layer /= 8 + data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) + return data + + +def estimate_command(args): + data = gather_data(args) + for row in data: + for i, item in enumerate(row): + if isinstance(item, (int, float)): + row[i] = convert_bytes(item) + elif isinstance(item, dict): + training_usage = max(item.values()) + row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" + + headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] + + title = f"Memory Usage for loading `{args.model_name}`" + table = create_ascii_table(headers, data, title) + print(table) + + +def main(): + parser = estimate_command_parser() + args = parser.parse_args() + estimate_command(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/launch.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..a35e0c422c8b998f06325117f79b7ee8cdc888c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/launch.py @@ -0,0 +1,1092 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import importlib +import logging +import os +import subprocess +import sys +from pathlib import Path + +import psutil +import torch + +from accelerate.commands.config import default_config_file, load_config_from_file +from accelerate.commands.config.config_args import SageMakerConfig +from accelerate.commands.config.config_utils import DYNAMO_BACKENDS +from accelerate.commands.utils import CustomArgumentParser +from accelerate.state import get_int_from_env +from accelerate.utils import ( + ComputeEnvironment, + DistributedType, + PrepareForLaunch, + _filter_args, + check_cuda_p2p_ib_support, + convert_dict_to_env_variables, + is_bf16_available, + is_deepspeed_available, + is_mlu_available, + is_npu_available, + is_rich_available, + is_sagemaker_available, + is_torch_version, + is_torch_xla_available, + is_xpu_available, + patch_environment, + prepare_deepspeed_cmd_env, + prepare_multi_gpu_env, + prepare_sagemager_args_inputs, + prepare_simple_launcher_cmd_env, + prepare_tpu, +) +from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES + + +if is_rich_available(): + from rich import get_console + from rich.logging import RichHandler + + FORMAT = "%(message)s" + logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()]) + + +logger = logging.getLogger(__name__) + + +options_to_group = { + "multi_gpu": "Distributed GPUs", + "tpu": "TPU", + "use_deepspeed": "DeepSpeed Arguments", + "use_fsdp": "FSDP Arguments", + "use_megatron_lm": "Megatron-LM Arguments", +} + + +def clean_option(option): + "Finds all cases of - after the first two characters and changes them to _" + if option.startswith("--"): + return option[2:].replace("-", "_") + + +class CustomHelpFormatter(argparse.HelpFormatter): + """ + This is a custom help formatter that will hide all arguments that are not used in the command line when the help is + called. This is useful for the case where the user is using a specific platform and only wants to see the arguments + for that platform. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.titles = [ + "Hardware Selection Arguments", + "Resource Selection Arguments", + "Training Paradigm Arguments", + "positional arguments", + "optional arguments", + ] + + def add_argument(self, action: argparse.Action): + if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]: + args = sys.argv[2:] + else: + args = sys.argv[1:] + + if len(args) > 1: + args = list(map(clean_option, args)) + used_platforms = [arg for arg in args if arg in options_to_group.keys()] + used_titles = [options_to_group[o] for o in used_platforms] + if action.container.title not in self.titles + used_titles: + action.help = argparse.SUPPRESS + elif action.container.title == "Hardware Selection Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + elif action.container.title == "Training Paradigm Arguments": + if set(action.option_strings).isdisjoint(set(args)): + action.help = argparse.SUPPRESS + else: + action.help = action.help + " (currently selected)" + + action.option_strings = [s for s in action.option_strings if "-" not in s[2:]] + super().add_argument(action) + + def end_section(self): + if len(self._current_section.items) < 2: + self._current_section.items = [] + self._current_section.heading = "" + super().end_section() + + +def launch_command_parser(subparsers=None): + description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)" + if subparsers is not None: + parser = subparsers.add_parser( + "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter + ) + else: + parser = CustomArgumentParser( + "Accelerate launch command", + description=description, + add_help=False, + allow_abbrev=False, + formatter_class=CustomHelpFormatter, + ) + + parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.") + + parser.add_argument( + "--config_file", + default=None, + help="The config file to use for the default values in the launching script.", + ) + parser.add_argument( + "--quiet", + "-q", + action="store_true", + help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)", + ) + # Hardware selection arguments + hardware_args = parser.add_argument_group( + "Hardware Selection Arguments", "Arguments for selecting the hardware to be used." + ) + hardware_args.add_argument( + "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU." + ) + hardware_args.add_argument( + "--multi_gpu", + default=False, + action="store_true", + help="Whether or not this should launch a distributed GPU training.", + ) + hardware_args.add_argument( + "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training." + ) + hardware_args.add_argument( + "--ipex", + default=False, + action="store_true", + help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.", + ) + + # Resource selection arguments + resource_args = parser.add_argument_group( + "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used." + ) + resource_args.add_argument( + "--mixed_precision", + type=str, + choices=["no", "fp16", "bf16", "fp8"], + help="Whether or not to use mixed precision training. " + "Choose between FP16 and BF16 (bfloat16) training. " + "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", + ) + resource_args.add_argument( + "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel." + ) + resource_args.add_argument( + "--num_machines", type=int, default=None, help="The total number of machines used in this training." + ) + resource_args.add_argument( + "--num_cpu_threads_per_process", + type=int, + default=None, + help="The number of CPU threads per process. Can be tuned for optimal performance.", + ) + resource_args.add_argument( + "--enable_cpu_affinity", + default=False, + action="store_true", + help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.", + ) + + # Dynamo arguments + resource_args.add_argument( + "--dynamo_backend", + type=str, + choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS], + help="Choose a backend to optimize your training with dynamo, see more at " + "https://github.com/pytorch/torchdynamo.", + ) + resource_args.add_argument( + "--dynamo_mode", + type=str, + default="default", + choices=TORCH_DYNAMO_MODES, + help="Choose a mode to optimize your training with dynamo.", + ) + resource_args.add_argument( + "--dynamo_use_fullgraph", + default=False, + action="store_true", + help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs", + ) + resource_args.add_argument( + "--dynamo_use_dynamic", + default=False, + action="store_true", + help="Whether to enable dynamic shape tracing.", + ) + + # Training Paradigm arguments + paradigm_args = parser.add_argument_group( + "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used." + ) + paradigm_args.add_argument( + "--use_deepspeed", + default=False, + action="store_true", + help="Whether to use deepspeed.", + ) + paradigm_args.add_argument( + "--use_fsdp", + default=False, + action="store_true", + help="Whether to use fsdp.", + ) + paradigm_args.add_argument( + "--use_megatron_lm", + default=False, + action="store_true", + help="Whether to use Megatron-LM.", + ) + paradigm_args.add_argument( + "--use_xpu", + default=False, + action="store_true", + help="Whether to use IPEX plugin to speed up training on XPU specifically.", + ) + + # distributed GPU training arguments + distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.") + distributed_args.add_argument( + "--gpu_ids", + default=None, + help="What GPUs (by id) should be used for training on this machine as a comma-seperated list", + ) + distributed_args.add_argument( + "--same_network", + default=False, + action="store_true", + help="Whether all machines used for multinode training exist on the same local network.", + ) + distributed_args.add_argument( + "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched." + ) + distributed_args.add_argument( + "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0." + ) + distributed_args.add_argument( + "--main_process_port", + type=int, + default=None, + help="The port to use to communicate with the machine of rank 0.", + ) + distributed_args.add_argument( + "-t", + "--tee", + default="0", + type=str, + help="Tee std streams into a log file and also to console.", + ) + distributed_args.add_argument( + "--role", + type=str, + default="default", + help="User-defined role for the workers.", + ) + # Rendezvous related arguments + distributed_args.add_argument( + "--rdzv_backend", + type=str, + default="static", + help="The rendezvous method to use, such as 'static' (the default) or 'c10d'", + ) + distributed_args.add_argument( + "--rdzv_conf", + type=str, + default="", + help="Additional rendezvous configuration (=,=,...).", + ) + distributed_args.add_argument( + "--max_restarts", + type=int, + default=0, + help="Maximum number of worker group restarts before failing.", + ) + distributed_args.add_argument( + "--monitor_interval", + type=float, + default=5, + help="Interval, in seconds, to monitor the state of workers.", + ) + parser.add_argument( + "-m", + "--module", + action="store_true", + help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.", + ) + parser.add_argument( + "--no_python", + action="store_true", + help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.", + ) + + # TPU arguments + tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.") + tpu_args.add_argument( + "--tpu_cluster", + action="store_true", + dest="tpu_use_cluster", + help="Whether to use a GCP TPU pod for training.", + ) + tpu_args.add_argument( + "--no_tpu_cluster", + action="store_false", + dest="tpu_use_cluster", + help="Should not be passed explicitly, this is for internal use only.", + ) + tpu_args.add_argument( + "--tpu_use_sudo", + action="store_true", + help="Whether to use `sudo` when running the TPU training script in each pod.", + ) + tpu_args.add_argument( + "--vm", + type=str, + action="append", + help=( + "List of single Compute VM instance names. " + "If not provided we assume usage of instance groups. For TPU pods." + ), + ) + tpu_args.add_argument( + "--env", + type=str, + action="append", + help="List of environment variables to set on the Compute VM instances. For TPU pods.", + ) + tpu_args.add_argument( + "--main_training_function", + type=str, + default=None, + help="The name of the main function to be executed in your script (only for TPU training).", + ) + tpu_args.add_argument( + "--downcast_bf16", + action="store_true", + help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.", + ) + + # DeepSpeed arguments + deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.") + deepspeed_args.add_argument( + "--deepspeed_config_file", + default=None, + type=str, + help="DeepSpeed config file.", + ) + deepspeed_args.add_argument( + "--zero_stage", + default=None, + type=int, + help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `2`.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_device", + default=None, + type=str, + help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_optimizer_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--offload_param_nvme_path", + default=None, + type=str, + help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to 'none'.", + ) + deepspeed_args.add_argument( + "--gradient_accumulation_steps", + default=None, + type=int, + help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1`.", + ) + deepspeed_args.add_argument( + "--gradient_clipping", + default=None, + type=float, + help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). " + "If unspecified, will default to `1.0`.", + ) + deepspeed_args.add_argument( + "--zero3_init_flag", + default=None, + type=str, + help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.", + ) + deepspeed_args.add_argument( + "--zero3_save_16bit_model", + default=None, + type=str, + help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. " + "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.", + ) + deepspeed_args.add_argument( + "--deepspeed_hostfile", + default=None, + type=str, + help="DeepSpeed hostfile for configuring multi-node compute resources.", + ) + deepspeed_args.add_argument( + "--deepspeed_exclusion_filter", + default=None, + type=str, + help="DeepSpeed exclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_inclusion_filter", + default=None, + type=str, + help="DeepSpeed inclusion filter string when using mutli-node setup.", + ) + deepspeed_args.add_argument( + "--deepspeed_multinode_launcher", + default=None, + type=str, + help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.", + ) + deepspeed_args.add_argument( + "--deepspeed_moe_layer_cls_names", + default=None, + type=str, + help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." + " (useful only when `use_deepspeed` flag is passed).", + ) + + # fsdp arguments + fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.") + fsdp_args.add_argument( + "--fsdp_offload_params", + default="false", + type=str, + help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_min_num_params", + type=int, + default=1e8, + help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sharding_strategy", + type=str, + default="FULL_SHARD", + help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_auto_wrap_policy", + type=str, + default=None, + help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_transformer_layer_cls_to_wrap", + default=None, + type=str, + help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch_policy", + default=None, + type=str, + help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.", + ) + fsdp_args.add_argument( + "--fsdp_backward_prefetch", + default=None, + type=str, + help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_state_dict_type", + default=None, + type=str, + help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_forward_prefetch", + default="false", + type=str, + help="If True, then FSDP explicitly prefetches the next upcoming " + "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_use_orig_params", + default="true", + type=str, + help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres." + " (useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_cpu_ram_efficient_loading", + default="true", + type=str, + help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " + "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. " + "(useful only when `use_fsdp` flag is passed).", + ) + fsdp_args.add_argument( + "--fsdp_sync_module_states", + default="true", + type=str, + help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0." + " (useful only when `use_fsdp` flag is passed).", + ) + + # megatron_lm args + megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.") + megatron_lm_args.add_argument( + "--megatron_lm_tp_degree", + type=int, + default=1, + help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_pp_degree", + type=int, + default=1, + help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_num_micro_batches", + type=int, + default=None, + help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_sequence_parallelism", + default=None, + type=str, + help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_recompute_activations", + default=None, + type=str, + help="Decides Whether (true|false) to enable Selective Activation Recomputation. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_use_distributed_optimizer", + default=None, + type=str, + help="Decides Whether (true|false) to use distributed optimizer " + "which shards optimizer state and gradients across Data Pralellel (DP) ranks. " + "(useful only when `use_megatron_lm` flag is passed).", + ) + megatron_lm_args.add_argument( + "--megatron_lm_gradient_clipping", + default=1.0, + type=float, + help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). " + "(useful only when `use_megatron_lm` flag is passed).", + ) + + # AWS arguments + aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.") + aws_args.add_argument( + "--aws_access_key_id", + type=str, + default=None, + help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job", + ) + aws_args.add_argument( + "--aws_secret_access_key", + type=str, + default=None, + help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Whether to print out the torch.distributed stack trace when something fails.", + ) + parser.add_argument( + "training_script", + type=str, + help=( + "The full path to the script to be launched in parallel, followed by all the arguments for the training " + "script." + ), + ) + + # MPI arguments + mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU") + mpirun_args.add_argument( + "--mpirun_hostfile", + type=str, + default=None, + help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will " + "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.", + ) + mpirun_args.add_argument( + "--mpirun_ccl", + type=int, + default=1, + help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.", + ) + + # Other arguments of the training scripts + parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.") + + if subparsers is not None: + parser.set_defaults(func=launch_command) + return parser + + +def simple_launcher(args): + cmd, current_env = prepare_simple_launcher_cmd_env(args) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + + +def multi_gpu_launcher(args): + import torch.distributed.run as distrib_run + + current_env = prepare_multi_gpu_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def deepspeed_launcher(args): + import torch.distributed.run as distrib_run + + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.") + else: + from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME + + cmd, current_env = prepare_deepspeed_cmd_env(args) + if not check_cuda_p2p_ib_support(): + message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled." + warn = False + if "NCCL_P2P_DISABLE" not in current_env: + current_env["NCCL_P2P_DISABLE"] = "1" + warn = True + if "NCCL_IB_DISABLE" not in current_env: + current_env["NCCL_IB_DISABLE"] = "1" + warn = True + if warn: + logger.warning(message) + + if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: + with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f: + valid_env_items = convert_dict_to_env_variables(current_env) + if len(valid_env_items) > 1: + f.writelines(valid_env_items) + + process = subprocess.Popen(cmd, env=current_env) + process.wait() + if process.returncode != 0: + if not args.quiet: + raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) + else: + sys.exit(1) + else: + debug = getattr(args, "debug", False) + args = _filter_args( + args, + distrib_run.get_args_parser(), + ["--training_script", args.training_script, "--training_script_args", args.training_script_args], + ) + with patch_environment(**current_env): + try: + distrib_run.run(args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def tpu_launcher(args): + import torch_xla.distributed.xla_multiprocessing as xmp + + if args.no_python: + raise ValueError("--no_python cannot be used with TPU launcher") + + args, current_env = prepare_tpu(args, {}) + + if args.module: + mod_name = args.training_script + else: + # Import training_script as a module + script_path = Path(args.training_script) + sys.path.append(str(script_path.parent.resolve())) + mod_name = script_path.stem + + mod = importlib.import_module(mod_name) + if not hasattr(mod, args.main_training_function): + raise ValueError( + f"Your training script should have a function named {args.main_training_function}, or you should pass a " + "different value to `--main_training_function`." + ) + + # Patch sys.argv + sys.argv = [mod.__file__] + args.training_script_args + + main_function = getattr(mod, args.main_training_function) + with patch_environment(**current_env): + xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes) + + +def tpu_pod_launcher(args): + from torch_xla.distributed import xla_dist + + current_env = {} + args, current_env = prepare_tpu(args, current_env, True) + debug = getattr(args, "debug", False) + + training_script = args.training_script + training_script_args = args.training_script_args + new_args = _filter_args( + args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"] + ) + + if args.tpu_use_sudo: + new_cmd = ["sudo"] + else: + new_cmd = [] + + new_cmd += [ + "accelerate-launch", + "--tpu", + "--no_tpu_cluster", + "--num_machines", + "1", + "--mixed_precision", + "no", + "--dynamo_backend", + "no", + "--num_processes", + str(args.num_processes), + "--main_training_function", + str(args.main_training_function), + training_script, + ] + training_script_args + + new_args.positional = new_cmd + bad_flags = "" + for arg in vars(new_args): + if arg.startswith("docker_"): + value = getattr(new_args, arg) + if value != "" and value is not None: + bad_flags += f'{arg}="{value}"\n' + if bad_flags != "": + raise ValueError( + f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}" + ) + new_args.env = [f"{k}={v}" for k, v in current_env.items()] + new_args.env.append("ACCELERATE_IN_TPU_POD=1") + try: + xla_dist.resolve_and_execute(new_args) + except Exception: + if is_rich_available() and debug: + console = get_console() + console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]") + console.print_exception(suppress=[__file__], show_locals=False) + else: + raise + + +def sagemaker_launcher(sagemaker_config: SageMakerConfig, args): + if not is_sagemaker_available(): + raise ImportError( + "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`" + ) + if args.module or args.no_python: + raise ValueError( + "SageMaker requires a python training script file and cannot be used with --module or --no_python" + ) + + from sagemaker.huggingface import HuggingFace + + args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args) + + huggingface_estimator = HuggingFace(**args) + + huggingface_estimator.fit(inputs=sagemaker_inputs) + print(f"You can find your model data at: {huggingface_estimator.model_data}") + + +def _validate_launch_command(args): + # Sanity checks + if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1: + raise ValueError( + "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time." + ) + if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2): + raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.") + + defaults = None + warned = [] + mp_from_config_flag = False + # Get the default from the config file. + if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu: + defaults = load_config_from_file(args.config_file) + if ( + not args.multi_gpu + and not args.tpu + and not args.tpu_use_cluster + and not args.use_deepspeed + and not args.use_fsdp + and not args.use_megatron_lm + ): + args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED + args.multi_gpu = ( + True + if defaults.distributed_type + in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_XPU, + ) + else False + ) + args.tpu = defaults.distributed_type == DistributedType.XLA + args.use_fsdp = defaults.distributed_type == DistributedType.FSDP + args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM + args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False + if args.gpu_ids is None: + if defaults.gpu_ids is not None: + args.gpu_ids = defaults.gpu_ids + else: + args.gpu_ids = "all" + + if args.multi_gpu and args.num_machines is None: + args.num_machines = defaults.num_machines + + if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1: + raise ValueError( + "Less than two GPU ids were configured and tried to run on on multiple GPUs. " + "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`." + ) + if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE: + # Update args with the defaults + for name, attr in defaults.__dict__.items(): + if isinstance(attr, dict): + for k in defaults.deepspeed_config: + setattr(args, k, defaults.deepspeed_config[k]) + for k in defaults.fsdp_config: + arg_to_set = k + if "fsdp" not in arg_to_set: + arg_to_set = "fsdp_" + arg_to_set + setattr(args, arg_to_set, defaults.fsdp_config[k]) + for k in defaults.megatron_lm_config: + setattr(args, k, defaults.megatron_lm_config[k]) + for k in defaults.dynamo_config: + setattr(args, k, defaults.dynamo_config[k]) + for k in defaults.ipex_config: + setattr(args, k, defaults.ipex_config[k]) + for k in defaults.mpirun_config: + setattr(args, k, defaults.mpirun_config[k]) + continue + + # Those args are handled separately + if ( + name not in ["compute_environment", "mixed_precision", "distributed_type"] + and getattr(args, name, None) is None + ): + setattr(args, name, attr) + if not args.debug: + args.debug = defaults.debug + + if not args.mixed_precision: + if defaults.mixed_precision is None: + args.mixed_precision = "no" + else: + args.mixed_precision = defaults.mixed_precision + mp_from_config_flag = True + else: + if args.use_cpu or (args.use_xpu and torch.xpu.is_available()): + native_amp = is_torch_version(">=", "1.10") + else: + native_amp = is_bf16_available(True) + if ( + args.mixed_precision == "bf16" + and not native_amp + and not (args.tpu and is_torch_xla_available(check_is_tpu=True)) + ): + raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.") + + # Silently set the default here + if args.dynamo_backend is None: + args.dynamo_backend = "no" + else: + if args.num_processes is None: + if args.use_xpu and is_xpu_available(): + args.num_processes = torch.xpu.device_count() + elif is_mlu_available(): + args.num_processes = torch.mlu.device_count() + elif is_npu_available(): + args.num_processes = torch.npu.device_count() + else: + args.num_processes = torch.cuda.device_count() + warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`") + if args.debug is None: + args.debug = False + if not args.multi_gpu and ( + (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1) + or (is_mlu_available() and torch.mlu.device_count() > 1) + or (is_npu_available() and torch.npu.device_count() > 1) + or (torch.cuda.device_count() > 1) + ): + warned.append( + "\t\tMore than one GPU was found, enabling multi-GPU training.\n" + "\t\tIf this was unintended please pass in `--num_processes=1`." + ) + args.multi_gpu = True + if args.num_machines is None: + warned.append("\t`--num_machines` was set to a value of `1`") + args.num_machines = 1 + if args.mixed_precision is None: + warned.append("\t`--mixed_precision` was set to a value of `'no'`") + args.mixed_precision = "no" + if not hasattr(args, "use_cpu"): + args.use_cpu = args.cpu + if args.dynamo_backend is None: + warned.append("\t`--dynamo_backend` was set to a value of `'no'`") + args.dynamo_backend = "no" + if args.debug: + logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.") + + is_aws_env_disabled = defaults is None or ( + defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER + ) + if is_aws_env_disabled and args.num_cpu_threads_per_process is None: + args.num_cpu_threads_per_process = 1 + if args.use_cpu and args.num_processes >= 1: + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 + ) + threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if threads_per_process > 1: + args.num_cpu_threads_per_process = threads_per_process + warned.append( + f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs" + ) + + if any(warned): + message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n" + message += "\n".join(warned) + message += ( + "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`." + ) + logger.warning(message) + return args, defaults, mp_from_config_flag + + +def launch_command(args): + args, defaults, mp_from_config_flag = _validate_launch_command(args) + # Use the proper launcher + if args.use_deepspeed and not args.cpu: + args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else [] + if mp_from_config_flag: + args.deepspeed_fields_from_accelerate_config.append("mixed_precision") + args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config) + deepspeed_launcher(args) + elif args.use_fsdp and not args.cpu: + multi_gpu_launcher(args) + elif args.use_megatron_lm and not args.cpu: + multi_gpu_launcher(args) + elif args.multi_gpu and not args.cpu: + multi_gpu_launcher(args) + elif args.tpu and not args.cpu: + if args.tpu_use_cluster: + tpu_pod_launcher(args) + else: + tpu_launcher(args) + elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: + sagemaker_launcher(defaults, args) + else: + simple_launcher(args) + + +def main(): + parser = launch_command_parser() + args = parser.parse_args() + launch_command(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c851cc0b192ab8207d3fa68d7409868c84354c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .selection_menu import BulletMenu diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ad095e78cec41160e8b844cdbecc42a456fc256 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..975d1b667cdb3231f9d72f0f1bad26431de73ef1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1430b721b5997dc819d4b336e43d257d90151fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33d1eb1884383df404c7349b55570483dd5b28f3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53cbb8275837cec2039aaba2d60b52a63b2a03c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10e9490c450bf007f8413825029a3d4a3a369e6a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py @@ -0,0 +1,65 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet +""" + +import os +import sys +from contextlib import contextmanager + + +# Windows only +if os.name == "nt": + import ctypes + import msvcrt # noqa + + class CursorInfo(ctypes.Structure): + # _fields is a specific attr expected by ctypes + _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] + + +def hide_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = False + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25l") + sys.stdout.flush() + + +def show_cursor(): + if os.name == "nt": + ci = CursorInfo() + handle = ctypes.windll.kernel32.GetStdHandle(-11) + ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) + ci.visible = True + ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) + elif os.name == "posix": + sys.stdout.write("\033[?25h") + sys.stdout.flush() + + +@contextmanager +def hide(): + "Context manager to hide the terminal cursor" + try: + hide_cursor() + yield + finally: + show_cursor() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..de46f37ddcf4591167e3e01791391e4b1729034f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py @@ -0,0 +1,59 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A variety of helper functions and constants when dealing with terminal menu choices, based on +https://github.com/bchao1/bullet +""" + +import enum +import shutil +import sys + + +TERMINAL_WIDTH, _ = shutil.get_terminal_size() + +CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"} + + +class Direction(enum.Enum): + UP = 0 + DOWN = 1 + + +def forceWrite(content, end=""): + sys.stdout.write(str(content) + end) + sys.stdout.flush() + + +def writeColor(content, color, end=""): + forceWrite(f"\u001b[{color}m{content}\u001b[0m", end) + + +def reset_cursor(): + forceWrite("\r") + + +def move_cursor(num_lines: int, direction: str): + forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}") + + +def clear_line(): + forceWrite(" " * TERMINAL_WIDTH) + reset_cursor() + + +def linebreak(): + reset_cursor() + forceWrite("-" * TERMINAL_WIDTH) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/input.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/input.py new file mode 100644 index 0000000000000000000000000000000000000000..2690f86aa61f7ac648f4a9c2040a34ee35147201 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/input.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file contains utilities for handling input from the user and registering specific keys to specific functions, +based on https://github.com/bchao1/bullet +""" + +from typing import List + +from .keymap import KEYMAP, get_character + + +def mark(key: str): + """ + Mark the function with the key code so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += [key] + func.handle_key = handle + return func + + return decorator + + +def mark_multiple(*keys: List[str]): + """ + Mark the function with the key codes so it can be handled in the register + """ + + def decorator(func): + handle = getattr(func, "handle_key", []) + handle += keys + func.handle_key = handle + return func + + return decorator + + +class KeyHandler(type): + """ + Metaclass that adds the key handlers to the class + """ + + def __new__(cls, name, bases, attrs): + new_cls = super().__new__(cls, name, bases, attrs) + if not hasattr(new_cls, "key_handler"): + new_cls.key_handler = {} + new_cls.handle_input = KeyHandler.handle_input + + for value in attrs.values(): + handled_keys = getattr(value, "handle_key", []) + for key in handled_keys: + new_cls.key_handler[key] = value + return new_cls + + @staticmethod + def handle_input(cls): + "Finds and returns the selected character if it exists in the handler" + char = get_character() + if char != KEYMAP["undefined"]: + char = ord(char) + handler = cls.key_handler.get(char) + if handler: + cls.current_selection = char + return handler(cls) + else: + return None + + +def register(cls): + """Adds KeyHandler metaclass to the class""" + return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy()) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py new file mode 100644 index 0000000000000000000000000000000000000000..787db12860fe21c6786dda69c34fcccab114f2f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py @@ -0,0 +1,133 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet +""" + +import os +import string +import sys + + +ARROW_KEY_FLAG = 1 << 8 + +KEYMAP = { + "tab": ord("\t"), + "newline": ord("\r"), + "esc": 27, + "up": 65 + ARROW_KEY_FLAG, + "down": 66 + ARROW_KEY_FLAG, + "right": 67 + ARROW_KEY_FLAG, + "left": 68 + ARROW_KEY_FLAG, + "mod_int": 91, + "undefined": sys.maxsize, + "interrupt": 3, + "insert": 50, + "delete": 51, + "pg_up": 53, + "pg_down": 54, +} + +KEYMAP["arrow_begin"] = KEYMAP["up"] +KEYMAP["arrow_end"] = KEYMAP["left"] + +if sys.platform == "win32": + WIN_CH_BUFFER = [] + WIN_KEYMAP = { + b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, + b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, + b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, + b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, + b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, + } + +for i in range(10): + KEYMAP[str(i)] = ord(str(i)) + + +def get_raw_chars(): + "Gets raw characters from inputs" + if os.name == "nt": + import msvcrt + + encoding = "mbcs" + # Flush the keyboard buffer + while msvcrt.kbhit(): + msvcrt.getch() + if len(WIN_CH_BUFFER) == 0: + # Read the keystroke + ch = msvcrt.getch() + + # If it is a prefix char, get second part + if ch in (b"\x00", b"\xe0"): + ch2 = ch + msvcrt.getch() + # Translate actual Win chars to bullet char types + try: + chx = chr(WIN_KEYMAP[ch2]) + WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"])) + WIN_CH_BUFFER.append(chx) + if ord(chx) in ( + KEYMAP["insert"] - 1 << 9, + KEYMAP["delete"] - 1 << 9, + KEYMAP["pg_up"] - 1 << 9, + KEYMAP["pg_down"] - 1 << 9, + ): + WIN_CH_BUFFER.append(chr(126)) + ch = chr(KEYMAP["esc"]) + except KeyError: + ch = ch2[1] + else: + ch = ch.decode(encoding) + else: + ch = WIN_CH_BUFFER.pop(0) + elif os.name == "posix": + import termios + import tty + + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + + +def get_character(): + "Gets a character from the keyboard and returns the key code" + char = get_raw_chars() + if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]: + return char + + elif ord(char) == KEYMAP["esc"]: + combo = get_raw_chars() + if ord(combo) == KEYMAP["mod_int"]: + key = get_raw_chars() + if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: + return chr(ord(key) + ARROW_KEY_FLAG) + else: + return KEYMAP["undefined"] + else: + return get_raw_chars() + + else: + if char in string.printable: + return char + else: + return KEYMAP["undefined"] diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py new file mode 100644 index 0000000000000000000000000000000000000000..ee9a771a54ef666ee46b67ae6c75fb957d49efdd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py @@ -0,0 +1,144 @@ +# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Main driver for the selection menu, based on https://github.com/bchao1/bullet +""" + +import builtins +import sys + +from ...utils.imports import _is_package_available +from . import cursor, input +from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor +from .keymap import KEYMAP + + +in_colab = False +try: + in_colab = _is_package_available("google.colab") +except ModuleNotFoundError: + pass + + +@input.register +class BulletMenu: + """ + A CLI menu to select a choice from a list of choices using the keyboard. + """ + + def __init__(self, prompt: str = None, choices: list = []): + self.position = 0 + self.choices = choices + self.prompt = prompt + if sys.platform == "win32": + self.arrow_char = "*" + else: + self.arrow_char = "➔ " + + def write_choice(self, index, end: str = ""): + if sys.platform != "win32": + writeColor(self.choices[index], 32, end) + else: + forceWrite(self.choices[index], end) + + def print_choice(self, index: int): + "Prints the choice at the given index" + if index == self.position: + forceWrite(f" {self.arrow_char} ") + self.write_choice(index) + else: + forceWrite(f" {self.choices[index]}") + reset_cursor() + + def move_direction(self, direction: Direction, num_spaces: int = 1): + "Should not be directly called, used to move a direction of either up or down" + old_position = self.position + if direction == Direction.DOWN: + if self.position + 1 >= len(self.choices): + return + self.position += num_spaces + else: + if self.position - 1 < 0: + return + self.position -= num_spaces + clear_line() + self.print_choice(old_position) + move_cursor(num_spaces, direction.name) + self.print_choice(self.position) + + @input.mark(KEYMAP["up"]) + def move_up(self): + self.move_direction(Direction.UP) + + @input.mark(KEYMAP["down"]) + def move_down(self): + self.move_direction(Direction.DOWN) + + @input.mark(KEYMAP["newline"]) + def select(self): + move_cursor(len(self.choices) - self.position, "DOWN") + return self.position + + @input.mark(KEYMAP["interrupt"]) + def interrupt(self): + move_cursor(len(self.choices) - self.position, "DOWN") + raise KeyboardInterrupt + + @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)]) + def select_row(self): + index = int(chr(self.current_selection)) + movement = index - self.position + if index == self.position: + return + if index < len(self.choices): + if self.position > index: + self.move_direction(Direction.UP, -movement) + elif self.position < index: + self.move_direction(Direction.DOWN, movement) + else: + return + else: + return + + def run(self, default_choice: int = 0): + "Start the menu and return the selected choice" + if self.prompt: + linebreak() + forceWrite(self.prompt, "\n") + if in_colab: + forceWrite("Please input a choice index (starting from 0), and press enter", "\n") + else: + forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n") + self.position = default_choice + for i in range(len(self.choices)): + self.print_choice(i) + forceWrite("\n") + move_cursor(len(self.choices) - self.position, "UP") + with cursor.hide(): + while True: + if in_colab: + try: + choice = int(builtins.input()) + except ValueError: + choice = default_choice + else: + choice = self.handle_input() + if choice is not None: + reset_cursor() + for _ in range(len(self.choices) + 1): + move_cursor(1, "UP") + clear_line() + self.write_choice(choice, "\n") + return choice diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/test.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/test.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d2f7bcf14727aa13e3438f4cd6e6f140f5bb2f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/test.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package + + +def test_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("test") + else: + parser = argparse.ArgumentParser("Accelerate test command") + + parser.add_argument( + "--config_file", + default=None, + help=( + "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " + "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " + "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " + "with 'huggingface'." + ), + ) + + if subparsers is not None: + parser.set_defaults(func=test_command) + return parser + + +def test_command(args): + script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py") + + if args.config_file is None: + test_args = [script_name] + else: + test_args = f"--config_file={args.config_file} {script_name}".split() + + cmd = ["accelerate-launch"] + test_args + result = execute_subprocess_async(cmd) + if result.returncode == 0: + print("Test is a success! You are ready for your distributed training!") + + +def main(): + parser = test_command_parser() + args = parser.parse_args() + test_command(args) + + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/tpu.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/tpu.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0f07bf8697bfdb6484d3bf817f2e18b1313b00 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/tpu.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import subprocess + +from packaging.version import Version, parse + +from accelerate.commands.config.config_args import default_config_file, load_config_from_file + + +_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`." + + +def tpu_command_parser(subparsers=None): + if subparsers is not None: + parser = subparsers.add_parser("tpu-config", description=_description) + else: + parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description) + # Core arguments + config_args = parser.add_argument_group( + "Config Arguments", "Arguments that can be configured through `accelerate config`." + ) + config_args.add_argument( + "--config_file", + type=str, + default=None, + help="Path to the config file to use for accelerate.", + ) + config_args.add_argument( + "--tpu_name", + default=None, + help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.", + ) + config_args.add_argument( + "--tpu_zone", + default=None, + help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.", + ) + pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.") + pod_args.add_argument( + "--use_alpha", + action="store_true", + help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.", + ) + pod_args.add_argument( + "--command_file", + default=None, + help="The path to the file containing the commands to run on the pod on startup.", + ) + pod_args.add_argument( + "--command", + action="append", + nargs="+", + help="A command to run on the pod. Can be passed multiple times.", + ) + pod_args.add_argument( + "--install_accelerate", + action="store_true", + help="Whether to install accelerate on the pod. Defaults to False.", + ) + pod_args.add_argument( + "--accelerate_version", + default="latest", + help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.", + ) + pod_args.add_argument( + "--debug", action="store_true", help="If set, will print the command that would be run instead of running it." + ) + + if subparsers is not None: + parser.set_defaults(func=tpu_command_launcher) + return parser + + +def tpu_command_launcher(args): + defaults = None + + # Get the default from the config file if it exists. + if args.config_file is not None or os.path.isfile(default_config_file): + defaults = load_config_from_file(args.config_file) + if not args.command_file and defaults.command_file is not None and not args.command: + args.command_file = defaults.command_file + if not args.command and defaults.commands is not None: + args.command = defaults.commands + if not args.tpu_name: + args.tpu_name = defaults.tpu_name + if not args.tpu_zone: + args.tpu_zone = defaults.tpu_zone + if args.accelerate_version == "dev": + args.accelerate_version = "git+https://github.com/huggingface/accelerate.git" + elif args.accelerate_version == "latest": + args.accelerate_version = "accelerate -U" + elif isinstance(parse(args.accelerate_version), Version): + args.accelerate_version = f"accelerate=={args.accelerate_version}" + + if not args.command_file and not args.command: + raise ValueError("You must specify either a command file or a command to run on the pod.") + + if args.command_file: + with open(args.command_file) as f: + args.command = [f.read().splitlines()] + + # To turn list of lists into list of strings + if isinstance(args.command[0], list): + args.command = [line for cmd in args.command for line in cmd] + # Default to the shared folder and install accelerate + new_cmd = ["cd /usr/share"] + if args.install_accelerate: + new_cmd += [f"pip install {args.accelerate_version}"] + new_cmd += args.command + args.command = "; ".join(new_cmd) + + # Then send it to gcloud + # Eventually try to use google-api-core to do this instead of subprocess + cmd = ["gcloud"] + if args.use_alpha: + cmd += ["alpha"] + cmd += [ + "compute", + "tpus", + "tpu-vm", + "ssh", + args.tpu_name, + "--zone", + args.tpu_zone, + "--command", + args.command, + "--worker", + "all", + ] + if args.debug: + print(f"Running {' '.join(cmd)}") + return + subprocess.run(cmd) + print("Successfully setup pod.") + + +def main(): + parser = tpu_command_parser() + args = parser.parse_args() + + tpu_command_launcher(args) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/commands/utils.py b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b65215fac7666b475af98b17e264ef6701239bc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/commands/utils.py @@ -0,0 +1,120 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + + +class _StoreAction(argparse.Action): + """ + Custom action that allows for `-` or `_` to be passed in for an argument. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + new_option_strings = [] + for option_string in self.option_strings: + new_option_strings.append(option_string) + if "_" in option_string[2:]: + # Add `-` version to the option string + new_option_strings.append(option_string.replace("_", "-")) + self.option_strings = new_option_strings + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(_StoreAction): + """ + Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`. + """ + + def __init__(self, option_strings, dest, const, default=None, required=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + """ + Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`. + """ + + def __init__( + self, + option_strings, + dest, + default=None, + required=False, + help=None, + ): + super().__init__( + option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help + ) + + +class CustomArgumentGroup(argparse._ArgumentGroup): + """ + Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def _add_action(self, action): + args = vars(action) + if isinstance(action, argparse._StoreTrueAction): + action = _StoreTrueAction( + args["option_strings"], args["dest"], args["default"], args["required"], args["help"] + ) + elif isinstance(action, argparse._StoreConstAction): + action = _StoreConstAction( + args["option_strings"], + args["dest"], + args["const"], + args["default"], + args["required"], + args["help"], + ) + elif isinstance(action, argparse._StoreAction): + action = _StoreAction(**args) + action = super()._add_action(action) + return action + + +class CustomArgumentParser(argparse.ArgumentParser): + """ + Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each + when applicable. + """ + + def add_argument(self, *args, **kwargs): + if "action" in kwargs: + # Translate action -> class + if kwargs["action"] == "store_true": + kwargs["action"] = _StoreTrueAction + else: + kwargs["action"] = _StoreAction + super().add_argument(*args, **kwargs) + + def add_argument_group(self, *args, **kwargs): + group = CustomArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/data_loader.py b/llmeval-env/lib/python3.10/site-packages/accelerate/data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf6631f1622bb7845efcac0ba1acf09e108dc5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/data_loader.py @@ -0,0 +1,1149 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from contextlib import suppress +from typing import Callable, List, Optional, Union + +import torch +from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler + +from .logging import get_logger +from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available +from .utils import ( + RNGType, + broadcast, + broadcast_object_list, + concatenate, + find_batch_size, + get_data_structure, + initialize_tensors, + is_torch_version, + send_to_device, + slice_tensors, + synchronize_rng_states, +) + + +logger = get_logger(__name__) + +# kwargs of the DataLoader in min version 1.4.0. +_PYTORCH_DATALOADER_KWARGS = { + "batch_size": 1, + "shuffle": False, + "sampler": None, + "batch_sampler": None, + "num_workers": 0, + "collate_fn": None, + "pin_memory": False, + "drop_last": False, + "timeout": 0, + "worker_init_fn": None, + "multiprocessing_context": None, + "generator": None, + "prefetch_factor": 2, + "persistent_workers": False, +} + +# kwargs added after by version +_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {} + +for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items(): + if is_torch_version(">=", v): + _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs) + + +class SeedableRandomSampler(RandomSampler): + """ + Same as a random sampler, except that in `__iter__` a seed can be used. + + Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed + and be fully reproducable on multiple iterations. + + If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on + (stored in `self.epoch`). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.epoch = 0 + self.initial_seed = torch.random.initial_seed() + + def __iter__(self): + if self.generator is None: + self.generator = torch.Generator() + self.generator.manual_seed(self.initial_seed) + + # Allow `self.epoch` to modify the seed of the generator + seed = self.epoch + self.initial_seed + # print("Setting seed at epoch", self.epoch, seed) + self.generator.manual_seed(seed) + yield from super().__iter__() + self.set_epoch(self.epoch + 1) + + def set_epoch(self, epoch: int): + "Sets the current iteration of the sampler." + self.epoch = epoch + + +class BatchSamplerShard(BatchSampler): + """ + Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will + always yield a number of batches that is a round multiple of `num_processes` and that all have the same size. + Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + batch_sampler (`torch.utils.data.sampler.BatchSampler`): + The batch sampler to split in several shards. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in: + + - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if + this argument is set to `False`. + - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]` + then `[6, 7]` if this argument is set to `True`. + even_batches (`bool`, *optional*, defaults to `True`): + Whether or not to loop back at the beginning of the sampler when the number of samples is not a round + multiple of (original batch size / number of processes). + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + """ + + def __init__( + self, + batch_sampler: BatchSampler, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + even_batches: bool = True, + ): + if split_batches and batch_sampler.batch_size % num_processes != 0: + raise ValueError( + f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.batch_sampler = batch_sampler + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + self.even_batches = even_batches + self.batch_size = getattr(batch_sampler, "batch_size", None) + self.drop_last = getattr(batch_sampler, "drop_last", False) + if self.batch_size is None and self.even_batches: + raise ValueError( + "You need to use `even_batches=False` when the batch sampler has no batch size. If you " + "are not calling this method directly, set `accelerator.even_batches=False` instead." + ) + + @property + def total_length(self): + return len(self.batch_sampler) + + def __len__(self): + if self.split_batches: + # Split batches does not change the length of the batch sampler + return len(self.batch_sampler) + if len(self.batch_sampler) % self.num_processes == 0: + # If the length is a round multiple of the number of processes, it's easy. + return len(self.batch_sampler) // self.num_processes + length = len(self.batch_sampler) // self.num_processes + if self.drop_last: + # Same if we drop the remainder. + return length + elif self.even_batches: + # When we even batches we always get +1 + return length + 1 + else: + # Otherwise it depends on the process index. + return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length + + def __iter__(self): + return self._iter_with_split() if self.split_batches else self._iter_with_no_split() + + def _iter_with_split(self): + initial_data = [] + batch_length = self.batch_sampler.batch_size // self.num_processes + for idx, batch in enumerate(self.batch_sampler): + if idx == 0: + initial_data = batch + if len(batch) == self.batch_size: + # If the batch is full, we yield the part of it this process is responsible of. + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + # If drop_last is True of the last batch was full, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size: + if not self.even_batches: + if len(batch) > batch_length * self.process_index: + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + else: + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.batch_size: + initial_data += initial_data + batch = batch + initial_data + yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)] + + def _iter_with_no_split(self): + initial_data = [] + batch_to_yield = [] + for idx, batch in enumerate(self.batch_sampler): + # We gather the initial indices in case we need to circle back at the end. + if not self.drop_last and idx < self.num_processes: + initial_data += batch + # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually + # yielding it. + if idx % self.num_processes == self.process_index: + batch_to_yield = batch + if idx % self.num_processes == self.num_processes - 1 and ( + self.batch_size is None or len(batch) == self.batch_size + ): + yield batch_to_yield + batch_to_yield = [] + + # If drop_last is True, iteration is over, otherwise... + if not self.drop_last and len(initial_data) > 0: + if not self.even_batches: + if len(batch_to_yield) > 0: + yield batch_to_yield + else: + # ... we yield the complete batch we had saved before if it has the proper length + if len(batch_to_yield) == self.batch_size: + yield batch_to_yield + + # For degenerate cases where the dataset has less than num_process * batch_size samples + while len(initial_data) < self.num_processes * self.batch_size: + initial_data += initial_data + + # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next + if len(batch) == self.batch_size: + batch = [] + idx += 1 + + # Make sure we yield a multiple of self.num_processes batches + cycle_index = 0 + while idx % self.num_processes != 0 or len(batch) > 0: + end_index = cycle_index + self.batch_size - len(batch) + batch += initial_data[cycle_index:end_index] + if idx % self.num_processes == self.process_index: + yield batch + cycle_index = end_index + batch = [] + idx += 1 + + +class IterableDatasetShard(IterableDataset): + """ + Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will + always yield a number of samples that is a round multiple of the actual batch size (depending of the value of + `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the + `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would + be too small or loop with indices from the beginning. + + Args: + dataset (`torch.utils.data.dataset.IterableDataset`): + The batch sampler to split in several shards. + batch_size (`int`, *optional*, defaults to 1): + The size of the batches per shard (if `split_batches=False`) or the size of the batches (if + `split_batches=True`). + drop_last (`bool`, *optional*, defaults to `False`): + Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the + beginning. + num_processes (`int`, *optional*, defaults to 1): + The number of processes running concurrently. + process_index (`int`, *optional*, defaults to 0): + The index of the current process. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the shards should be created by splitting a batch to give a piece of it on each process, or by + yielding different full batches on each process. + + On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in: + + - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this + argument is set to `False`. + - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if + this argument is set to `True`. + """ + + def __init__( + self, + dataset: IterableDataset, + batch_size: int = 1, + drop_last: bool = False, + num_processes: int = 1, + process_index: int = 0, + split_batches: bool = False, + ): + if split_batches and batch_size > 1 and batch_size % num_processes != 0: + raise ValueError( + f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + self.dataset = dataset + self.batch_size = batch_size + self.drop_last = drop_last + self.num_processes = num_processes + self.process_index = process_index + self.split_batches = split_batches + + def set_epoch(self, epoch): + self.epoch = epoch + if hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + def __len__(self): + # We will just raise the downstream error if the underlying dataset is not sized + if self.drop_last: + return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size + else: + return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size + + def __iter__(self): + if ( + not hasattr(self.dataset, "set_epoch") + and hasattr(self.dataset, "generator") + and isinstance(self.dataset.generator, torch.Generator) + ): + self.dataset.generator.manual_seed(self.epoch) + real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes) + process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size + process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size) + + first_batch = None + current_batch = [] + for element in self.dataset: + current_batch.append(element) + # Wait to have a full batch before yielding elements. + if len(current_batch) == real_batch_size: + for i in process_slice: + yield current_batch[i] + if first_batch is None: + first_batch = current_batch.copy() + current_batch = [] + + # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning. + if not self.drop_last and len(current_batch) > 0: + if first_batch is None: + first_batch = current_batch.copy() + while len(current_batch) < real_batch_size: + current_batch += first_batch + for i in process_slice: + yield current_batch[i] + + +class DataLoaderStateMixin: + """ + Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the + end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other + useful information that might be needed. + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch + - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total + batch size + + """ + + def __init_subclass__(cls, **kwargs): + cls.end_of_dataloader = False + cls.remainder = -1 + + def reset(self): + self.end_of_dataloader = False + self.remainder = -1 + + def begin(self): + "Prepares the gradient state for the current dataloader" + self.reset() + with suppress(Exception): + if not self._drop_last: + length = getattr(self.dataset, "total_dataset_length", len(self.dataset)) + self.remainder = length % self.total_batch_size + self.gradient_state._add_dataloader(self) + + def end(self): + "Cleans up the gradient state after exiting the dataloader" + self.gradient_state._remove_dataloader(self) + + +class DataLoaderShard(DataLoader, DataLoaderStateMixin): + """ + Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this datalaoder. + device (`torch.device`, *optional*): + If passed, the device to put all batches on. + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: an optional `torch.Generator` + synchronized_generator (`torch.Generator`, *optional*): + A random number generator to keep synchronized across processes. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning. + **kwargs (additional keyword arguments, *optional*): + All other keyword arguments to pass to the regular `DataLoader` initialization. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__( + self, + dataset, + device=None, + rng_types=None, + synchronized_generator=None, + skip_batches=0, + _drop_last: bool = False, + _non_blocking: bool = False, + **kwargs, + ): + super().__init__(dataset, **kwargs) + self.device = device + self.rng_types = rng_types + self.synchronized_generator = synchronized_generator + self.skip_batches = skip_batches + self.gradient_state = GradientState() + self._drop_last = _drop_last + self._non_blocking = _non_blocking + self.iteration = 0 + + def __iter__(self): + if self.rng_types is not None: + synchronize_rng_states(self.rng_types, self.synchronized_generator) + self.begin() + + self.set_epoch(self.iteration) + dataloader_iter = super().__iter__() + # We iterate one batch ahead to check when we are at the end + try: + current_batch = next(dataloader_iter) + except StopIteration: + yield + + batch_index = 0 + while True: + try: + # But we still move it to the device so it is done before `StopIteration` is reached + if self.device is not None: + current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking) + next_batch = next(dataloader_iter) + if batch_index >= self.skip_batches: + yield current_batch + batch_index += 1 + current_batch = next_batch + except StopIteration: + self.end_of_dataloader = True + if batch_index >= self.skip_batches: + yield current_batch + break + + self.iteration += 1 + self.end() + + def set_epoch(self, epoch: int): + # In case it is manually passed in, the user can set it to what they like + if self.iteration != epoch: + self.iteration = epoch + if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(epoch) + # We support if a custom `Dataset` implementation has `set_epoch` + # or in general HF datasets `Datasets` + elif hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + @property + def total_batch_size(self): + batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler + return ( + batch_sampler.batch_size + if getattr(batch_sampler, "split_batches", False) + else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1)) + ) + + @property + def total_dataset_length(self): + if hasattr(self.dataset, "total_length"): + return self.dataset.total_length + else: + return len(self.dataset) + + def get_sampler(self): + return get_sampler(self) + + def set_sampler(self, sampler): + sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) + if sampler_is_batch_sampler: + self.sampler.sampler = sampler + else: + self.batch_sampler.sampler = sampler + if hasattr(self.batch_sampler, "batch_sampler"): + self.batch_sampler.batch_sampler.sampler = sampler + + +if is_torch_xla_available(): + import torch_xla.distributed.parallel_loader as xpl + + class MpDeviceLoaderWrapper(xpl.MpDeviceLoader): + """ + Wrapper for the xpl.MpDeviceLoader class that knows the total batch size. + + XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to + prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main + thread only. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__(self, dataloader: DataLoaderShard, device: torch.device): + super().__init__(dataloader, device) + self._rng_types = self._loader.rng_types + self._loader.rng_types = None + + def __iter__(self): + if self._rng_types is not None: + synchronize_rng_states(self._rng_types, self._loader.synchronized_generator) + + return super().__iter__() + + @property + def total_batch_size(self): + return self._loader.total_batch_size + + @property + def total_dataset_length(self): + return self._loader.total_dataset_length + + @property + def batch_sampler(self): + return self._loader.batch_sampler + + +class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin): + """ + Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each + process their part of the batch. + + Args: + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be + the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial + `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch + size of the `dataloader` is a round multiple of `batch_size`. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning of an iteration. + + **Available attributes:** + + - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes. + Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total + number of processes + + - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes. + """ + + def __init__( + self, + dataset, + split_batches: bool = False, + skip_batches=0, + _drop_last: bool = False, + _non_blocking: bool = False, + slice_fn=None, + **kwargs, + ): + shuffle = False + if is_torch_version(">=", "1.11.0"): + from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe + + # We need to save the shuffling state of the DataPipe + if isinstance(dataset, ShufflerIterDataPipe): + shuffle = dataset._shuffle_enabled + super().__init__(dataset, **kwargs) + self.split_batches = split_batches + if shuffle: + torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + + self.gradient_state = GradientState() + self.state = AcceleratorState() + self._drop_last = _drop_last + self._non_blocking = _non_blocking + self.skip_batches = skip_batches + + self.slice_fn = slice_tensors if slice_fn is None else slice_fn + self.iteration = 0 + + def _fetch_batches(self, iterator): + batches, batch = None, None + # On process 0, we gather the batch to dispatch. + if self.state.process_index == 0: + try: + if self.split_batches: + # One batch of the main iterator is dispatched and split. + batch = next(iterator) + else: + # num_processes batches of the main iterator are concatenated then dispatched and split. + # We add the batches one by one so we have the remainder available when drop_last=False. + batches = [] + for _ in range(self.state.num_processes): + batches.append(next(iterator)) + try: + batch = concatenate(batches, dim=0) + except RuntimeError as e: + raise RuntimeError( + "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`." + "either pass `dispatch_batches=False` and have each process fetch its own batch " + " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and " + "slice it into `num_processes` batches for each process." + ) from e + # In both cases, we need to get the structure of the batch that we will broadcast on other + # processes to initialize the tensors with the right shape. + # data_structure, stop_iteration + batch_info = [get_data_structure(batch), False] + except StopIteration: + batch_info = [None, True] + else: + batch_info = [None, self._stop_iteration] + # This is inplace, so after this instruction, every process has the same `batch_info` as process 0. + broadcast_object_list(batch_info) + self._stop_iteration = batch_info[1] + if self._stop_iteration: + # If drop_last is False and split_batches is False, we may have a remainder to take care of. + if not self.split_batches and not self._drop_last: + if self.state.process_index == 0 and len(batches) > 0: + batch = concatenate(batches, dim=0) + batch_info = [get_data_structure(batch), False] + else: + batch_info = [None, True] + broadcast_object_list(batch_info) + return batch, batch_info + + def __iter__(self): + self.begin() + self.set_epoch(self.iteration) + main_iterator = None + if is_torch_version(">=", "2.0.1"): + # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts + # shared seed to all dist processes. Thus, we need to create iterator for all dist processes. + # But, we only iterate through the DataLoader on process 0. + main_iterator = super().__iter__() + elif self.state.process_index == 0: + main_iterator = super().__iter__() + stop_iteration = False + self._stop_iteration = False + first_batch = None + next_batch, next_batch_info = self._fetch_batches(main_iterator) + batch_index = 0 + while not stop_iteration: + batch, batch_info = next_batch, next_batch_info + + if self.state.process_index != 0: + # Initialize tensors on other processes than process 0. + batch = initialize_tensors(batch_info[0]) + batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking) + # Broadcast the batch before splitting it. + batch = broadcast(batch, from_process=0) + + if not self._drop_last and first_batch is None: + # We keep at least num processes elements of the first batch to be able to complete the last batch + first_batch = self.slice_fn( + batch, + slice(0, self.state.num_processes), + process_index=self.state.process_index, + num_processes=self.state.num_processes, + ) + + if batch is None: + raise ValueError( + f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration." + ) + + observed_batch_size = find_batch_size(batch) + batch_size = observed_batch_size // self.state.num_processes + + stop_iteration = self._stop_iteration + if not stop_iteration: + # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in + # the dataloader since the number of batches is a round multiple of the number of processes. + next_batch, next_batch_info = self._fetch_batches(main_iterator) + # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them. + if self._stop_iteration and next_batch_info[0] is None: + stop_iteration = True + + if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0: + # If the last batch is not complete, let's add the first batch to it. + batch = concatenate([batch, first_batch], dim=0) + # Batch size computation above is wrong, it's off by 1 so we fix it. + batch_size += 1 + + data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size) + batch = self.slice_fn( + batch, + data_slice, + process_index=self.state.process_index, + num_processes=self.state.num_processes, + ) + + if stop_iteration: + self.end_of_dataloader = True + self.remainder = observed_batch_size + if batch_index >= self.skip_batches: + yield batch + batch_index += 1 + self.iteration += 1 + self.end() + + def set_epoch(self, epoch: int): + # In case it is manually passed in, the user can set it to what they like + if self.iteration != epoch: + self.iteration = epoch + if hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(epoch) + elif hasattr(self.dataset, "set_epoch"): + self.dataset.set_epoch(epoch) + + def __len__(self): + whole_length = super().__len__() + if self.split_batches: + return whole_length + elif self._drop_last: + return whole_length // self.state.num_processes + else: + return math.ceil(whole_length / self.state.num_processes) + + @property + def total_batch_size(self): + return ( + self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes) + ) + + @property + def total_dataset_length(self): + return len(self.dataset) + + def get_sampler(self): + return get_sampler(self) + + def set_sampler(self, sampler): + sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler) + if sampler_is_batch_sampler: + self.sampler.sampler = sampler + else: + self.batch_sampler.sampler = sampler + if hasattr(self.batch_sampler, "batch_sampler"): + self.batch_sampler.batch_sampler.sampler = sampler + + +def get_sampler(dataloader): + """ + Get the sampler associated to the dataloader + + Args: + dataloader (`torch.utils.data.dataloader.DataLoader`): + The data loader to split across several devices. + Returns: + `torch.utils.data.Sampler`: The sampler associated to the dataloader + """ + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + if sampler_is_batch_sampler: + sampler = getattr(dataloader.sampler, "sampler", None) + else: + sampler = getattr(dataloader.batch_sampler, "sampler", None) + return sampler + + +def prepare_data_loader( + dataloader: DataLoader, + device: Optional[torch.device] = None, + num_processes: Optional[int] = None, + process_index: Optional[int] = None, + split_batches: bool = False, + put_on_device: bool = False, + rng_types: Optional[List[Union[str, RNGType]]] = None, + dispatch_batches: Optional[bool] = None, + even_batches: bool = True, + slice_fn_for_dispatch: Optional[Callable] = None, + use_seedable_sampler: bool = False, + non_blocking: bool = False, +) -> DataLoader: + """ + Wraps a PyTorch `DataLoader` to generate batches for one of the processes only. + + Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration + at the first batch that would be too small / not present on all processes or loop with indices from the beginning. + + Args: + dataloader (`torch.utils.data.dataloader.DataLoader`): + The data loader to split across several devices. + device (`torch.device`): + The target device for the returned `DataLoader`. + num_processes (`int`, *optional*): + The number of processes running concurrently. Will default to the value given by + [`~state.AcceleratorState`]. + process_index (`int`, *optional*): + The index of the current process. Will default to the value given by [`~state.AcceleratorState`]. + split_batches (`bool`, *optional*, defaults to `False`): + Whether the resulting `DataLoader` should split the batches of the original data loader across devices or + yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of + `num_processes` batches at each iteration). + + Another way to see this is that the observed batch size will be the same as the initial `dataloader` if + this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes` + otherwise. + + Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of + `batch_size`. + put_on_device (`bool`, *optional*, defaults to `False`): + Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or + dictionaries of tensors). + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration. Should be one or + several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + dispatch_batches (`bool`, *optional*): + If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches + are split and broadcast to each process. Will default to `True` when the underlying dataset is an + `IterableDataset`, `False` otherwise. + even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + slice_fn_for_dispatch (`Callable`, *optional*`): + If passed, this function will be used to slice tensors across `num_processes`. Will default to + [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be + ignored otherwise. + use_seedable_sampler (`bool`, *optional*, defaults to `False`): + Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better + reproducability. Comes at a cost of potentially different performances due to different shuffling + algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every + `self.set_epoch` + non_blocking (`bool`, *optional*, defaults to `False`): + If set to `True`, dataloader will utilize non-blocking host-to-device transfers. If the dataloader has + `pin_memory` set to `True`, this will help to increase overlap between data transfer and computations. + + + Returns: + `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches + + + + `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches` + equal to `False` + + + """ + if dispatch_batches is None: + if not put_on_device: + dispatch_batches = False + else: + dispatch_batches = isinstance(dataloader.dataset, IterableDataset) + + if dispatch_batches and not put_on_device: + raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.") + # Grab defaults from AcceleratorState + state = AcceleratorState() + if num_processes is None: + num_processes = state.num_processes + if process_index is None: + process_index = state.process_index + + # Sanity check + if split_batches: + if dataloader.batch_size is not None: + batch_size_for_check = dataloader.batch_size + else: + # For custom batch_sampler + if hasattr(dataloader.batch_sampler, "batch_size"): + batch_size_for_check = dataloader.batch_sampler.batch_size + else: + raise ValueError( + "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed " + "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. " + "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` " + f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set." + ) + + if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0: + raise ValueError( + f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) " + f"needs to be a round multiple of the number of processes ({num_processes})." + ) + + new_dataset = dataloader.dataset + # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it + new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + synchronized_generator = None + + sampler = get_sampler(dataloader) + if isinstance(sampler, RandomSampler) and use_seedable_sampler: + # When iterating through the dataloader during distributed processes + # we want to ensure that on each process we are iterating through the same + # samples in the same order if a seed is set. This requires a tweak + # to the `torch.utils.data.RandomSampler` class (if used). + sampler = SeedableRandomSampler( + data_source=sampler.data_source, + replacement=sampler.replacement, + num_samples=sampler._num_samples, + generator=getattr(sampler, "generator", torch.Generator()), + ) + + if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA: + # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled. + generator = torch.Generator().manual_seed(42) + dataloader.generator = generator + dataloader.sampler.generator = generator + # No change if no multiprocess + if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches: + if isinstance(new_dataset, IterableDataset): + if getattr(dataloader.dataset, "generator", None) is not None: + synchronized_generator = dataloader.dataset.generator + new_dataset = IterableDatasetShard( + new_dataset, + batch_size=dataloader.batch_size, + drop_last=dataloader.drop_last, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + ) + else: + if not use_seedable_sampler and hasattr(sampler, "generator"): + if sampler.generator is None: + sampler.generator = torch.Generator() + synchronized_generator = sampler.generator + batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler + new_batch_sampler = BatchSamplerShard( + batch_sampler, + num_processes=num_processes, + process_index=process_index, + split_batches=split_batches, + even_batches=even_batches, + ) + + # We ignore all of those since they are all dealt with by our new_batch_sampler + ignore_kwargs = [ + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "drop_last", + ] + + if rng_types is not None and synchronized_generator is None and "generator" in rng_types: + rng_types.remove("generator") + + kwargs = { + k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) + for k in _PYTORCH_DATALOADER_KWARGS + if k not in ignore_kwargs + } + + # Need to provide batch_size as batch_sampler is None for Iterable dataset + if new_batch_sampler is None: + kwargs["drop_last"] = dataloader.drop_last + kwargs["batch_size"] = ( + dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size + ) + if dispatch_batches: + kwargs.pop("generator") + dataloader = DataLoaderDispatcher( + new_dataset, + split_batches=split_batches, + batch_sampler=new_batch_sampler, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + slice_fn=slice_fn_for_dispatch, + **kwargs, + ) + elif sampler_is_batch_sampler: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, + sampler=new_batch_sampler, + batch_size=dataloader.batch_size, + rng_types=rng_types, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + synchronized_generator=synchronized_generator, + **kwargs, + ) + else: + dataloader = DataLoaderShard( + new_dataset, + device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, + batch_sampler=new_batch_sampler, + rng_types=rng_types, + synchronized_generator=synchronized_generator, + _drop_last=dataloader.drop_last, + _non_blocking=non_blocking, + **kwargs, + ) + + if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler: + dataloader.set_sampler(sampler) + if state.distributed_type == DistributedType.XLA: + return MpDeviceLoaderWrapper(dataloader, device) + return dataloader + + +class SkipBatchSampler(BatchSampler): + """ + A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`. + """ + + def __init__(self, batch_sampler, skip_batches=0): + self.batch_sampler = batch_sampler + self.skip_batches = skip_batches + + def __iter__(self): + for index, samples in enumerate(self.batch_sampler): + if index >= self.skip_batches: + yield samples + + @property + def total_length(self): + return len(self.batch_sampler) + + def __len__(self): + return len(self.batch_sampler) - self.skip_batches + + +class SkipDataLoader(DataLoader): + """ + Subclass of a PyTorch `DataLoader` that will skip the first batches. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this datalaoder. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning. + kwargs: + All other keyword arguments to pass to the regular `DataLoader` initialization. + """ + + def __init__(self, dataset, skip_batches=0, **kwargs): + super().__init__(dataset, **kwargs) + self.skip_batches = skip_batches + + def __iter__(self): + for index, batch in enumerate(super().__iter__()): + if index >= self.skip_batches: + yield batch + + +def skip_first_batches(dataloader, num_batches=0): + """ + Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. + """ + dataset = dataloader.dataset + sampler_is_batch_sampler = False + if isinstance(dataset, IterableDataset): + new_batch_sampler = None + else: + sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler) + batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler + new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches) + + # We ignore all of those since they are all dealt with by our new_batch_sampler + ignore_kwargs = [ + "batch_size", + "shuffle", + "sampler", + "batch_sampler", + "drop_last", + ] + + kwargs = { + k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) + for k in _PYTORCH_DATALOADER_KWARGS + if k not in ignore_kwargs + } + + # Need to provide batch_size as batch_sampler is None for Iterable dataset + if new_batch_sampler is None: + kwargs["drop_last"] = dataloader.drop_last + kwargs["batch_size"] = dataloader.batch_size + + if isinstance(dataloader, DataLoaderDispatcher): + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + kwargs["skip_batches"] = num_batches + dataloader = DataLoaderDispatcher( + dataset, + split_batches=dataloader.split_batches, + batch_sampler=new_batch_sampler, + _drop_last=dataloader._drop_last, + **kwargs, + ) + elif isinstance(dataloader, DataLoaderShard): + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + kwargs["skip_batches"] = num_batches + elif sampler_is_batch_sampler: + kwargs["sampler"] = new_batch_sampler + kwargs["batch_size"] = dataloader.batch_size + else: + kwargs["batch_sampler"] = new_batch_sampler + dataloader = DataLoaderShard( + dataset, + device=dataloader.device, + rng_types=dataloader.rng_types, + synchronized_generator=dataloader.synchronized_generator, + **kwargs, + ) + else: + if new_batch_sampler is None: + # Need to manually skip batches in the dataloader + dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs) + else: + dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs) + + return dataloader diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/hooks.py b/llmeval-env/lib/python3.10/site-packages/accelerate/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a4b384f3cac39e7bedabb1f5e7c0320aae6a7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/hooks.py @@ -0,0 +1,709 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from typing import Dict, List, Mapping, Optional, Union + +import torch +import torch.nn as nn + +from .state import PartialState +from .utils import ( + PrefixedDataset, + find_device, + named_module_tensors, + send_to_device, + set_module_tensor_to_device, +) +from .utils.modeling import get_non_persistent_buffers +from .utils.other import recursive_getattr + + +class ModelHook: + """ + A hook that contains callbacks to be executed just before and after the forward method of a model. The difference + with PyTorch existing hooks is that they get passed along the kwargs. + + Class attribute: + - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under + the `torch.no_grad()` context manager. + """ + + no_grad = False + + def init_hook(self, module): + """ + To be executed when the hook is attached to the module. + + Args: + module (`torch.nn.Module`): The module attached to this hook. + """ + return module + + def pre_forward(self, module, *args, **kwargs): + """ + To be executed just before the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. + args (`Tuple[Any]`): The positional arguments passed to the module. + kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. + + Returns: + `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. + """ + return args, kwargs + + def post_forward(self, module, output): + """ + To be executed just after the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass been executed just before this event. + output (`Any`): The output of the module. + + Returns: + `Any`: The processed `output`. + """ + return output + + def detach_hook(self, module): + """ + To be executed when the hook is detached from a module. + + Args: + module (`torch.nn.Module`): The module detached from this hook. + """ + return module + + +class SequentialHook(ModelHook): + """ + A hook that can contain several hooks and iterates through them at each event. + """ + + def __init__(self, *hooks): + self.hooks = hooks + + def init_hook(self, module): + for hook in self.hooks: + module = hook.init_hook(module) + return module + + def pre_forward(self, module, *args, **kwargs): + for hook in self.hooks: + args, kwargs = hook.pre_forward(module, *args, **kwargs) + return args, kwargs + + def post_forward(self, module, output): + for hook in self.hooks: + output = hook.post_forward(module, output) + return output + + def detach_hook(self, module): + for hook in self.hooks: + module = hook.detach_hook(module) + return module + + +def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): + """ + Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove + this behavior and restore the original `forward` method, use `remove_hook_from_module`. + + + + If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks + together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. + + + + Args: + module (`torch.nn.Module`): + The module to attach a hook to. + hook (`ModelHook`): + The hook to attach. + append (`bool`, *optional*, defaults to `False`): + Whether the hook should be chained with an existing one (if module already contains a hook) or not. + + Returns: + `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can + be discarded). + """ + + if append and (getattr(module, "_hf_hook", None) is not None): + old_hook = module._hf_hook + remove_hook_from_module(module) + hook = SequentialHook(old_hook, hook) + + if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): + # If we already put some hook on this module, we replace it with the new one. + old_forward = module._old_forward + else: + old_forward = module.forward + module._old_forward = old_forward + + module = hook.init_hook(module) + module._hf_hook = hook + + def new_forward(module, *args, **kwargs): + args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) + if module._hf_hook.no_grad: + with torch.no_grad(): + output = module._old_forward(*args, **kwargs) + else: + output = module._old_forward(*args, **kwargs) + return module._hf_hook.post_forward(module, output) + + # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. + # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 + if "GraphModuleImpl" in str(type(module)): + module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) + else: + module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) + + return module + + +def remove_hook_from_module(module: nn.Module, recurse=False): + """ + Removes any hook attached to a module via `add_hook_to_module`. + + Args: + module (`torch.nn.Module`): The module to attach a hook to. + recurse (`bool`, **optional**): Whether to remove the hooks recursively + + Returns: + `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can + be discarded). + """ + + if hasattr(module, "_hf_hook"): + module._hf_hook.detach_hook(module) + delattr(module, "_hf_hook") + + if hasattr(module, "_old_forward"): + # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. + # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 + if "GraphModuleImpl" in str(type(module)): + module.__class__.forward = module._old_forward + else: + module.forward = module._old_forward + delattr(module, "_old_forward") + + if recurse: + for child in module.children(): + remove_hook_from_module(child, recurse) + + return module + + +class AlignDevicesHook(ModelHook): + """ + A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the + associated module, potentially offloading the weights after the forward pass. + + Args: + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + io_same_device (`bool`, *optional*, defaults to `False`): + Whether or not the output should be placed on the same device as the input was. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + place_submodules (`bool`, *optional*, defaults to `False`): + Whether to place the submodules on `execution_device` during the `init_hook` event. + """ + + def __init__( + self, + execution_device: Optional[Union[int, str, torch.device]] = None, + offload: bool = False, + io_same_device: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + place_submodules: bool = False, + skip_keys: Optional[Union[str, List[str]]] = None, + tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, + ): + self.execution_device = execution_device + self.offload = offload + self.io_same_device = io_same_device + self.weights_map = weights_map + self.offload_buffers = offload_buffers + self.place_submodules = place_submodules + self.skip_keys = skip_keys + + # Will contain the input device when `io_same_device=True`. + self.input_device = None + self.param_original_devices = {} + self.buffer_original_devices = {} + self.tied_params_names = set() + + # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory + # for tied weights already loaded on the target execution device. + self.tied_params_map = tied_params_map + + def __repr__(self): + return ( + f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " + f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " + f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" + ) + + def init_hook(self, module): + # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. + if self.execution_device == "meta" or self.execution_device == torch.device("meta"): + self.tied_params_map = None + + if not self.offload and self.execution_device is not None: + for name, _ in named_module_tensors(module, recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) + elif self.offload: + self.original_devices = { + name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) + } + if self.weights_map is None: + self.weights_map = { + name: param.to("cpu") + for name, param in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ) + } + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True + ): + # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, + # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. + # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] + # to add on the fly pointers to `tied_params_map` in the pre_forward call. + if ( + self.tied_params_map is not None + and recursive_getattr(module, name).data_ptr() in self.tied_params_map + ): + self.tied_params_names.add(name) + + set_module_tensor_to_device(module, name, "meta") + + if not self.offload_buffers and self.execution_device is not None: + for name, _ in module.named_buffers(recurse=self.place_submodules): + set_module_tensor_to_device( + module, name, self.execution_device, tied_params_map=self.tied_params_map + ) + elif self.offload_buffers and self.execution_device is not None: + for name in get_non_persistent_buffers(module, recurse=self.place_submodules): + set_module_tensor_to_device( + module, name, self.execution_device, tied_params_map=self.tied_params_map + ) + + return module + + def pre_forward(self, module, *args, **kwargs): + if self.io_same_device: + self.input_device = find_device([args, kwargs]) + if self.offload: + self.tied_pointers_to_remove = set() + + for name, _ in named_module_tensors( + module, + include_buffers=self.offload_buffers, + recurse=self.place_submodules, + remove_non_persistent=True, + ): + fp16_statistics = None + value = self.weights_map[name] + if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): + if value.dtype == torch.int8: + fp16_statistics = self.weights_map[name.replace("weight", "SCB")] + + # In case we are using offloading with tied weights, we need to keep track of the offloaded weights + # that are loaded on device at this point, as we will need to remove them as well from the dictionary + # self.tied_params_map in order to allow to free memory. + if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: + self.tied_params_map[value.data_ptr()] = {} + + if ( + value is not None + and self.tied_params_map is not None + and value.data_ptr() in self.tied_params_map + and self.execution_device not in self.tied_params_map[value.data_ptr()] + ): + self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) + + set_module_tensor_to_device( + module, + name, + self.execution_device, + value=value, + fp16_statistics=fp16_statistics, + tied_params_map=self.tied_params_map, + ) + + return send_to_device(args, self.execution_device), send_to_device( + kwargs, self.execution_device, skip_keys=self.skip_keys + ) + + def post_forward(self, module, output): + if self.offload: + for name, _ in named_module_tensors( + module, + include_buffers=self.offload_buffers, + recurse=self.place_submodules, + remove_non_persistent=True, + ): + set_module_tensor_to_device(module, name, "meta") + if type(module).__name__ == "Linear8bitLt": + module.state.SCB = None + module.state.CxB = None + + # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from + # this dictionary to allow the garbage collector to do its job. + for value_pointer, device in self.tied_pointers_to_remove: + del self.tied_params_map[value_pointer][device] + self.tied_pointers_to_remove = set() + + if self.io_same_device and self.input_device is not None: + output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) + + return output + + def detach_hook(self, module): + if self.offload: + for name, device in self.original_devices.items(): + if device != torch.device("meta"): + set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) + return module + + +def attach_execution_device_hook( + module: torch.nn.Module, + execution_device: Union[int, str, torch.device], + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, + tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right + execution device + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`int`, `str` or `torch.device`): + The device on which inputs and model weights should be placed before the forward pass. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: + add_hook_to_module( + module, + AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map), + ) + + # Break the recursion if we get to a preload module. + if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: + return + + for child in module.children(): + attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map) + + +def attach_align_device_hook( + module: torch.nn.Module, + execution_device: Optional[torch.device] = None, + offload: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, + tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or + buffers. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + # Attach the hook on this module if it has any direct tensor. + directs = named_module_tensors(module) + full_offload = ( + offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes + ) + + if len(list(directs)) > 0 or full_offload: + if weights_map is not None: + prefix = f"{module_name}." if len(module_name) > 0 else "" + prefixed_weights_map = PrefixedDataset(weights_map, prefix) + else: + prefixed_weights_map = None + hook = AlignDevicesHook( + execution_device=execution_device, + offload=offload, + weights_map=prefixed_weights_map, + offload_buffers=offload_buffers, + place_submodules=full_offload, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook, append=True) + + # We stop the recursion in case we hit the full offload. + if full_offload: + return + + # Recurse on all children of the module. + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + + +def remove_hook_from_submodules(module: nn.Module): + """ + Recursively removes all hooks attached on the submodules of a given model. + + Args: + module (`torch.nn.Module`): The module on which to remove all hooks. + """ + remove_hook_from_module(module) + for child in module.children(): + remove_hook_from_submodules(child) + + +def attach_align_device_hook_on_blocks( + module: nn.Module, + execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None, + offload: Union[bool, Dict[str, bool]] = False, + weights_map: Mapping = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, + tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, +): + """ + Attaches `AlignDevicesHook` to all blocks of a given model as needed. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. It can be one device + for the whole module, or a dictionary mapping module name to device. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole + module, or a dictionary mapping module name to boolean. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): + A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution + device, this parameter is useful to reuse the first available pointer of a shared weight for all others, + instead of duplicating memory. + """ + # If one device and one offload, we've got one hook. + if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): + if not offload: + hook = AlignDevicesHook( + execution_device=execution_device, + io_same_device=True, + skip_keys=skip_keys, + place_submodules=True, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + else: + attach_align_device_hook( + module, + execution_device=execution_device, + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + return + + if not isinstance(execution_device, Mapping): + execution_device = {key: execution_device for key in offload.keys()} + if not isinstance(offload, Mapping): + offload = {key: offload for key in execution_device.keys()} + + if module_name in execution_device and module_name in offload and not offload[module_name]: + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + offload_buffers=offload_buffers, + io_same_device=(module_name == ""), + place_submodules=True, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map) + elif module_name in execution_device and module_name in offload: + attach_align_device_hook( + module, + execution_device=execution_device[module_name], + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + tied_params_map=tied_params_map, + ) + if not hasattr(module, "_hf_hook"): + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + io_same_device=(module_name == ""), + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook( + module, + execution_device[module_name], + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + elif module_name == "": + hook = AlignDevicesHook( + execution_device=execution_device.get(""), + io_same_device=True, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + add_hook_to_module(module, hook) + + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook_on_blocks( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + tied_params_map=tied_params_map, + ) + + +class CpuOffload(ModelHook): + """ + Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after + the forward, the user needs to call the `init_hook` method again for this. + + Args: + execution_device(`str`, `int` or `torch.device`, *optional*): + The device on which the model should be executed. Will default to the MPS device if it's available, then + GPU 0 if there is a GPU, and finally to the CPU. + prev_module_hook (`UserCpuOffloadHook`, *optional*): + The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If + passed, its offload method will be called just before the forward of the model to which this hook is + attached. + """ + + def __init__( + self, + execution_device: Optional[Union[str, int, torch.device]] = None, + prev_module_hook: Optional["UserCpuOffloadHook"] = None, + ): + self.prev_module_hook = prev_module_hook + + self.execution_device = execution_device if execution_device is not None else PartialState().default_device + + def init_hook(self, module): + return module.to("cpu") + + def pre_forward(self, module, *args, **kwargs): + if self.prev_module_hook is not None: + self.prev_module_hook.offload() + module.to(self.execution_device) + return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) + + +class UserCpuOffloadHook: + """ + A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook + or remove it entirely. + """ + + def __init__(self, model, hook): + self.model = model + self.hook = hook + + def offload(self): + self.hook.init_hook(self.model) + + def remove(self): + remove_hook_from_module(self.model) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/inference.py b/llmeval-env/lib/python3.10/site-packages/accelerate/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..cf4cf15017938e34867d4eeaad120745051ab385 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/inference.py @@ -0,0 +1,188 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from types import MethodType +from typing import Any, Dict, List, Optional, Tuple, Union + +from .state import PartialState +from .utils import ( + calculate_maximum_sizes, + convert_bytes, + copy_tensor_to_devices, + ignorant_find_batch_size, + infer_auto_device_map, + is_pippy_available, + pad_input_tensors, + send_to_device, +) + + +if is_pippy_available(): + from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points + from pippy.PipelineStage import PipelineStage + + +def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None): + """ + Calculates the device map for `model` with an offset for PiPPy + """ + if num_processes == 1: + return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False) + if max_memory is None: + model_size, shared = calculate_maximum_sizes(model) + + # Split into `n` chunks for each GPU + memory = (model_size + shared[0]) / num_processes + memory = convert_bytes(memory) + value, ending = memory.split(" ") + + # Add a chunk to deal with potential extra shared memory instances + memory = math.ceil(float(value)) * 1.1 + memory = f"{memory} {ending}" + max_memory = {i: memory for i in range(num_processes)} + device_map = infer_auto_device_map( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + clean_result=False, + ) + return device_map + + +def find_pippy_batch_size(args, kwargs): + found_batch_size = None + if args is not None: + for arg in args: + found_batch_size = ignorant_find_batch_size(arg) + if found_batch_size is not None: + break + if kwargs is not None and found_batch_size is None: + for kwarg in kwargs.values(): + found_batch_size = ignorant_find_batch_size(kwarg) + if found_batch_size is not None: + break + return found_batch_size + + +def build_pipeline(model, split_points, args, kwargs, num_chunks): + """ + Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing + in needed `args` and `kwargs` as the model needs on the CPU. + + Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use + `AcceleratorState.num_processes` + """ + # We need to annotate the split points in the model for PiPPy + state = PartialState() + annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points}) + found_batch_size = find_pippy_batch_size(args, kwargs) + if found_batch_size != num_chunks: + if args is not None: + args = pad_input_tensors(args, found_batch_size, num_chunks) + if kwargs is not None: + kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) + pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs) + stage = PipelineStage(pipe, state.local_process_index, device=state.device) + + return stage + + +def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs): + state = PartialState() + output = None + + if state.num_processes == 1: + output = forward(*args, **kwargs) + elif state.is_local_main_process: + found_batch_size = find_pippy_batch_size(args, kwargs) + if found_batch_size is None: + raise ValueError("Could not find batch size from args or kwargs") + else: + if found_batch_size != num_chunks: + args = pad_input_tensors(args, found_batch_size, num_chunks) + kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) + forward(*args, **kwargs) + elif state.is_last_process: + output = forward() + else: + forward() + if gather_output: + # Each node will get a copy of the full output which is only on the last GPU + output = copy_tensor_to_devices(output) + return output + + +def prepare_pippy( + model, + split_points: Optional[Union[str, List[str]]] = "auto", + no_split_module_classes: Optional[List[str]] = None, + example_args: Optional[Tuple[Any]] = (), + example_kwargs: Optional[Dict[str, Any]] = None, + num_chunks: Optional[int] = None, + gather_output: Optional[bool] = False, +): + """ + Wraps `model` for pipeline parallel inference. + + Args: + model (`torch.nn.Module`): + A model we want to split for pipeline-parallel inference + split_points (`str` or `List[str]`, defaults to 'auto'): + How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced + split given any model. Should be a list of layer names in the model to split by otherwise. + no_split_module_classes (`List[str]`): + A list of class names for layers we don't want to be split. + example_args (tuple of model inputs): + The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible. + example_kwargs (dict of model inputs) + The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure + that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition + is true for all cases. + num_chunks (`int`, defaults to the number of available GPUs): + The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but + this can be tuned and played with. In general one should have num_chunks >= num_gpus. + gather_output (`bool`, defaults to `False`): + If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. + """ + if not is_pippy_available(): + raise ImportError( + "`pippy` was not found to be installed on your system. Please " + "install using `pip install torchpippy` or ensure you have at least version 0.2.0" + ) + state = PartialState() + example_args = send_to_device(example_args, "cpu") + example_kwargs = send_to_device(example_kwargs, "cpu") + if num_chunks is None: + num_chunks = state.num_processes + if split_points == "auto": + device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes) + split_points = [] + for i in range(1, num_chunks): + split_points.append(next(k for k, v in device_map.items() if v == i)) + model.hf_split_points = split_points + stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks) + model._original_forward = model.forward + model._original_call = model.__call__ + model.pippy_stage = stage + model.hf_split_points = split_points + + def forward(*args, **kwargs): + return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs) + + # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` + # Note: creates an infinite recursion loop with `generate` + model_forward = MethodType(forward, model) + forward.__wrapped__ = model_forward + model.forward = forward + return model diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/launchers.py b/llmeval-env/lib/python3.10/site-packages/accelerate/launchers.py new file mode 100644 index 0000000000000000000000000000000000000000..0265b25187f813356cfb49768097d6cf2599b0d3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/launchers.py @@ -0,0 +1,258 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import tempfile + +import torch + +from .state import AcceleratorState, PartialState +from .utils import ( + PrecisionType, + PrepareForLaunch, + are_libraries_initialized, + check_cuda_p2p_ib_support, + get_gpu_info, + is_mps_available, + patch_environment, +) + + +def test_launch(): + "Verify a `PartialState` can be initialized." + _ = PartialState() + + +def notebook_launcher( + function, + args=(), + num_processes=None, + mixed_precision="no", + use_port="29500", + master_addr="127.0.0.1", + node_rank=0, + num_nodes=1, +): + """ + Launches a training function, using several processes or multiple nodes if it's possible in the current environment + (TPU with multiple cores for instance). + + + + To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If + any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. + + Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none + of those calls have been made. + + + + Args: + function (`Callable`): + The training function to execute. If it accepts arguments, the first argument should be the index of the + process run. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*): + The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to + the number of GPUs available otherwise. + mixed_precision (`str`, *optional*, defaults to `"no"`): + If `fp16` or `bf16`, will use mixed precision training on multi-GPU. + use_port (`str`, *optional*, defaults to `"29500"`): + The port to use to communicate between processes when launching a multi-GPU training. + master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): + The address to use for communication between processes. + node_rank (`int`, *optional*, defaults to 0): + The rank of the current node. + num_nodes (`int`, *optional*, defaults to 1): + The number of nodes to use for training. + + Example: + + ```python + # Assume this is defined in a Jupyter Notebook on an instance with two GPUs + from accelerate import notebook_launcher + + + def train(*args): + # Your training function here + ... + + + notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") + ``` + """ + # Are we in a google colab or a Kaggle Kernel? + in_colab = False + in_kaggle = False + if any(key.startswith("KAGGLE") for key in os.environ.keys()): + in_kaggle = True + elif "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + try: + mixed_precision = PrecisionType(mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): + # TPU launch + import torch_xla.distributed.xla_multiprocessing as xmp + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " + "your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + if num_processes is None: + num_processes = 8 + + launcher = PrepareForLaunch(function, distributed_type="TPU") + print(f"Launching a training on {num_processes} TPU cores.") + xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") + elif in_colab and get_gpu_info()[1] < 2: + # No need for a distributed launch otherwise as it's either CPU or one GPU. + if torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on one CPU.") + function(*args) + else: + if num_processes is None: + raise ValueError( + "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." + ) + if node_rank >= num_nodes: + raise ValueError("The node_rank must be less than the number of nodes.") + if num_processes > 1: + # Multi-GPU launch + from torch.multiprocessing import start_processes + from torch.multiprocessing.spawn import ProcessRaisedException + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " + "inside your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + # Check for specific libraries known to initialize CUDA that users constantly use + problematic_imports = are_libraries_initialized("bitsandbytes") + if len(problematic_imports) > 0: + err = ( + "Could not start distributed process. Libraries known to initialize CUDA upon import have been " + "imported already. Please keep these imports inside your training function to try and help with this:" + ) + for lib_name in problematic_imports: + err += f"\n\t* `{lib_name}`" + raise RuntimeError(err) + + patched_env = dict( + nproc=num_processes, + node_rank=node_rank, + world_size=num_nodes * num_processes, + master_addr=master_addr, + master_port=use_port, + mixed_precision=mixed_precision, + ) + + # Check for CUDA P2P and IB issues + if not check_cuda_p2p_ib_support(): + patched_env["nccl_p2p_disable"] = "1" + patched_env["nccl_ib_disable"] = "1" + + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment(**patched_env): + # First dummy launch + if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": + launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU") + try: + start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") + except ProcessRaisedException as e: + err = "An issue was found when verifying a stable environment for the notebook launcher." + if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: + raise RuntimeError( + f"{err}" + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + "which one is problematic and causing CUDA to be initialized." + ) from e + else: + raise RuntimeError(f"{err} The following error was raised: {e}") from e + # Now the actual launch + launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") + print(f"Launching training on {num_processes} GPUs.") + try: + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") + except ProcessRaisedException as e: + if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: + raise RuntimeError( + "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + "which one is problematic and causing CUDA to be initialized." + ) from e + else: + raise RuntimeError(f"An issue was found when launching the training: {e}") from e + + else: + # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + print("Launching training on MPS.") + elif torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on CPU.") + function(*args) + + +def debug_launcher(function, args=(), num_processes=2): + """ + Launches a training function using several processes on CPU for debugging purposes. + + + + This function is provided for internal testing and debugging, but it's not intended for real trainings. It will + only use the CPU. + + + + Args: + function (`Callable`): + The training function to execute. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*, defaults to 2): + The number of processes to use for training. + """ + from torch.multiprocessing import start_processes + + with tempfile.NamedTemporaryFile() as tmp_file: + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment( + world_size=num_processes, + master_addr="127.0.0.1", + master_port="29500", + accelerate_mixed_precision="no", + accelerate_debug_rdv_file=tmp_file.name, + accelerate_use_cpu="yes", + ): + launcher = PrepareForLaunch(function, debug=True) + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/local_sgd.py b/llmeval-env/lib/python3.10/site-packages/accelerate/local_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..7f2657fcc8b057b4396cf299e6cf681fa7b83aa8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/local_sgd.py @@ -0,0 +1,102 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from accelerate import Accelerator, DistributedType + + +class LocalSGD: + """ + A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently + on each device, and averages model weights every K synchronization step. + + It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular, + this is a simple implementation that cannot support scenarios such as model parallelism. + + + Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes + back to at least: + + Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint + arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) + + We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). + + Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on + Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) + + """ + + def __enter__(self): + if self.enabled: + self.model_sync_obj = self.model.no_sync() + self.model_sync_obj.__enter__() + + return self + + def __exit__(self, type, value, tb): + if self.enabled: + # Average all models on exit + self._sync_and_avg_model_params() + self.model_sync_obj.__exit__(type, value, tb) + + def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True): + """ + Constructor. + + Args: + model (`torch.nn.Module): + The model whose parameters we need to average. + accelerator (`Accelerator`): + Accelerator object. + local_sgd_steps (`int`): + A number of local SGD steps (before model parameters are synchronized). + enabled (`bool): + Local SGD is disabled if this parameter set to `False`. + """ + if accelerator.distributed_type not in [ + DistributedType.NO, + DistributedType.MULTI_CPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + ]: + raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)") + self.enabled = enabled and accelerator.distributed_type != DistributedType.NO + self.num_steps = 0 + if self.enabled: + self.accelerator = accelerator + self.model = model + self.local_sgd_steps = local_sgd_steps + + def step(self): + """ + This function makes a "step" and synchronizes model parameters if necessary. + """ + self.num_steps += 1 + if not self.enabled: + return + + if self.num_steps % self.local_sgd_steps == 0: + self._sync_and_avg_model_params() + + def _sync_and_avg_model_params(self): + """ + Synchronize + Average model parameters across all GPUs + """ + + self.accelerator.wait_for_everyone() + with self.accelerator.autocast(): + for param in self.model.parameters(): + param.data = self.accelerator.reduce(param.data, reduction="mean") diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/logging.py b/llmeval-env/lib/python3.10/site-packages/accelerate/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb8c1eb830e54e3f2870cb3a84afd33b7631ea6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/logging.py @@ -0,0 +1,123 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import logging +import os + +from .state import PartialState + + +class MultiProcessAdapter(logging.LoggerAdapter): + """ + An adapter to assist with logging in multiprocess. + + `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes + or only the main executed one. Default is `main_process_only=True`. + + Does not require an `Accelerator` object to be created first. + """ + + @staticmethod + def _should_log(main_process_only): + "Check if log should be performed" + state = PartialState() + return not main_process_only or (main_process_only and state.is_main_process) + + def log(self, level, msg, *args, **kwargs): + """ + Delegates logger call after checking if we should log. + + Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes + or only the main executed one. Default is `True` if not passed + + Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to + read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not + break with the previous behavior. + + `in_order` is ignored if `main_process_only` is passed. + """ + if PartialState._shared_state == {}: + raise RuntimeError( + "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." + ) + main_process_only = kwargs.pop("main_process_only", True) + in_order = kwargs.pop("in_order", False) + + if self.isEnabledFor(level): + if self._should_log(main_process_only): + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + + elif in_order: + state = PartialState() + for i in range(state.num_processes): + if i == state.process_index: + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + state.wait_for_everyone() + + @functools.lru_cache(None) + def warning_once(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the + cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to + switch to another type of cache that includes the caller frame information in the hashing function. + """ + self.warning(*args, **kwargs) + + +def get_logger(name: str, log_level: str = None): + """ + Returns a `logging.Logger` for `name` that can handle multiprocessing. + + If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all + processes and in order, also pass `in_order=True` + + Args: + name (`str`): + The name for the logger, such as `__file__` + log_level (`str`, *optional*): + The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not + + Example: + + ```python + >>> from accelerate.logging import get_logger + >>> from accelerate import Accelerator + + >>> logger = get_logger(__name__) + + >>> accelerator = Accelerator() + >>> logger.info("My log", main_process_only=False) + >>> logger.debug("My log", main_process_only=True) + + >>> logger = get_logger(__name__, log_level="DEBUG") + >>> logger.info("My log") + >>> logger.debug("My second log") + + >>> array = ["a", "b", "c", "d"] + >>> letter_at_rank = array[accelerator.process_index] + >>> logger.info(letter_at_rank, in_order=True) + ``` + """ + if log_level is None: + log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) + logger = logging.getLogger(name) + if log_level is not None: + logger.setLevel(log_level.upper()) + logger.root.setLevel(log_level.upper()) + return MultiProcessAdapter(logger, {}) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/memory_utils.py b/llmeval-env/lib/python3.10/site-packages/accelerate/memory_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2e2c8b9d7d0064c3e5e282737a7ad6919bde29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/memory_utils.py @@ -0,0 +1,22 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + + +warnings.warn( + "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " + "`from accelerate import find_executable_batch_size` to avoid this warning.", + FutureWarning, +) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/optimizer.py b/llmeval-env/lib/python3.10/site-packages/accelerate/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3230aa6606eb2f43b0ca72fdca64ce6d6aa01bdb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/optimizer.py @@ -0,0 +1,214 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings + +import torch + +from .state import AcceleratorState, GradientState +from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + +def move_to_device(state, device): + if isinstance(state, (list, tuple)): + return honor_type(state, (move_to_device(t, device) for t in state)) + elif isinstance(state, dict): + return type(state)({k: move_to_device(v, device) for k, v in state.items()}) + elif isinstance(state, torch.Tensor): + return state.to(device) + return state + + +class AcceleratedOptimizer(torch.optim.Optimizer): + """ + Internal wrapper around a torch optimizer. + + Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient + accumulation. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of + `optimizer` on the right device. + scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): + The scaler to use in the step function if training with mixed precision. + """ + + def __init__(self, optimizer, device_placement=True, scaler=None): + self.optimizer = optimizer + self.scaler = scaler + self.accelerator_state = AcceleratorState() + self.gradient_state = GradientState() + self.device_placement = device_placement + self._is_overflow = False + + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + # Handle device placement + if device_placement: + state_dict = self.optimizer.state_dict() + if self.accelerator_state.distributed_type == DistributedType.XLA: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + else: + state_dict = move_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, state): + self.optimizer.state = state + + @property + def param_groups(self): + return self.optimizer.param_groups + + @param_groups.setter + def param_groups(self, param_groups): + self.optimizer.param_groups = param_groups + + @property + def defaults(self): + return self.optimizer.defaults + + @defaults.setter + def defaults(self, defaults): + self.optimizer.defaults = defaults + + def add_param_group(self, param_group): + self.optimizer.add_param_group(param_group) + + def load_state_dict(self, state_dict): + if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + def state_dict(self): + return self.optimizer.state_dict() + + def zero_grad(self, set_to_none=None): + if self.gradient_state.sync_gradients: + accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters + if accept_arg: + if set_to_none is None: + set_to_none = True + self.optimizer.zero_grad(set_to_none=set_to_none) + else: + if set_to_none is not None: + raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") + self.optimizer.zero_grad() + + def train(self): + """ + Sets the optimizer to "train" mode. Useful for optimizers like `schedule_free` + """ + return self.optimizer.train() + + def eval(self): + """ + Sets the optimizer to "eval" mode. Useful for optimizers like `schedule_free` + """ + return self.optimizer.eval() + + def step(self, closure=None): + if is_lomo_available(): + from lomo_optim import AdaLomo, Lomo + + if ( + not self.gradient_state.is_xla_gradients_synced + and self.accelerator_state.distributed_type == DistributedType.XLA + ): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) + self.gradient_state.is_xla_gradients_synced = True + + if is_lomo_available(): + # `step` should be a no-op for LOMO optimizers. + if isinstance(self.optimizer, (Lomo, AdaLomo)): + return + + if self.gradient_state.sync_gradients: + if self.scaler is not None: + self.optimizer.step = self._optimizer_patched_step_method + + self.scaler.step(self.optimizer, closure) + self.scaler.update() + + if not self._accelerate_step_called: + # If the optimizer step was skipped, gradient overflow was detected. + self._is_overflow = True + else: + self._is_overflow = False + # Reset the step method to the original one + self.optimizer.step = self._optimizer_original_step_method + # Reset the indicator + self._accelerate_step_called = False + else: + self.optimizer.step(closure) + if self.accelerator_state.distributed_type == DistributedType.XLA: + self.gradient_state.is_xla_gradients_synced = False + + def _switch_parameters(self, parameters_map): + for param_group in self.optimizer.param_groups: + param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] + + @property + def is_overflow(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + warnings.warn( + "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`optimizer.step_was_skipped` instead.", + FutureWarning, + ) + return self._is_overflow + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was skipped.""" + return self._is_overflow + + def __getstate__(self): + _ignored_keys = [ + "_accelerate_step_called", + "_optimizer_original_step_method", + "_optimizer_patched_step_method", + ] + return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} + + def __setstate__(self, state): + self.__dict__.update(state) + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + +def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): + def patched_step(*args, **kwargs): + accelerated_optimizer._accelerate_step_called = True + return method(*args, **kwargs) + + return patched_step diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/scheduler.py b/llmeval-env/lib/python3.10/site-packages/accelerate/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa8a13f238afd7b908ee8e8cb8e0620f48d4ff8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/scheduler.py @@ -0,0 +1,98 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation + +import warnings + +from .state import AcceleratorState, GradientState + + +warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") + + +class AcceleratedScheduler: + """ + A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful + to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed + precision training) + + When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always + step the scheduler to account for it. + + Args: + scheduler (`torch.optim.lr_scheduler._LRScheduler`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + The optimizers used. + step_with_optimizer (`bool`, *optional*, defaults to `True`): + Whether or not the scheduler should be stepped at each optimizer step. + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the dataloaders split one batch across the different processes (so batch size is the same + regardless of the number of processes) or create batches on each process (so batch size is the original + batch size multiplied by the number of processes). + """ + + def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): + self.scheduler = scheduler + self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] + self.split_batches = split_batches + self.step_with_optimizer = step_with_optimizer + self.gradient_state = GradientState() + + def step(self, *args, **kwargs): + if not self.step_with_optimizer: + # No link between scheduler and optimizer -> just step + self.scheduler.step(*args, **kwargs) + return + + # Otherwise, first make sure the optimizer was stepped. + if not self.gradient_state.sync_gradients: + if self.gradient_state.adjust_scheduler: + self.scheduler._step_count += 1 + return + + for opt in self.optimizers: + if opt.step_was_skipped: + return + if self.split_batches: + # Split batches -> the training dataloader batch size is not changed so one step per training step + self.scheduler.step(*args, **kwargs) + else: + # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do + # num_processes steps per training step + num_processes = AcceleratorState().num_processes + for _ in range(num_processes): + # Special case when using OneCycle and `drop_last` was not used + if hasattr(self.scheduler, "total_steps"): + if self.scheduler._step_count <= self.scheduler.total_steps: + self.scheduler.step(*args, **kwargs) + else: + self.scheduler.step(*args, **kwargs) + + # Passthroughs + def get_last_lr(self): + return self.scheduler.get_last_lr() + + def state_dict(self): + return self.scheduler.state_dict() + + def load_state_dict(self, state_dict): + self.scheduler.load_state_dict(state_dict) + + def get_lr(self): + return self.scheduler.get_lr() + + def print_lr(self, *args, **kwargs): + return self.scheduler.print_lr(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/state.py b/llmeval-env/lib/python3.10/site-packages/accelerate/state.py new file mode 100644 index 0000000000000000000000000000000000000000..f39884c5eae482ae89caf07d85f9c8462b6b7d27 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/state.py @@ -0,0 +1,1208 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import math +import os +import threading +import warnings +from contextlib import contextmanager +from functools import partial +from typing import Any, Callable, Optional + +import torch + +from .utils import ( + DistributedType, + DynamoBackend, + GradientAccumulationPlugin, + check_cuda_p2p_ib_support, + check_fp8_capability, + get_ccl_version, + get_cpu_distributed_information, + get_int_from_env, + is_ccl_available, + is_datasets_available, + is_deepspeed_available, + is_fp8_available, + is_ipex_available, + is_mlu_available, + is_mps_available, + is_npu_available, + is_torch_xla_available, + is_xpu_available, + parse_choice_from_env, + parse_flag_from_env, + set_numa_affinity, +) +from .utils.dataclasses import SageMakerDistributedType + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +if is_mlu_available(check_device=False): + import torch_mlu # noqa: F401 + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + +logger = logging.getLogger(__name__) + + +def is_initialized() -> bool: + """ + Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, + but works as a module method. + """ + return AcceleratorState._shared_state != {} + + +# Lambda function that does nothing +def do_nothing(*args, **kwargs): + return None + + +class ThreadLocalSharedDict(threading.local): + """ + Descriptor that holds a dict shared between instances of a class in the same thread. + + Note: Descriptors have slightly different semantics than just a dict field on its own. + `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the + underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside + the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor + object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`). + + See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html + + This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3). + + See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3 + """ + + def __init__(self, thread_local: bool = False): + self._storage = {} + + def __get__(self, obj, objtype=None): + return self._storage + + def __set__(self, obj, value): + self._storage = value + + +# Prefer global shared dictionary, except when using TPU. +SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict + + +# Inspired by Alex Martelli's 'Borg'. +class PartialState: + """ + Singleton class that has information about the current training environment and functions to help with process + control. Designed to be used when only process control and device execution states are needed. Does *not* need to + be initialized from `Accelerator`. + + Args: + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to + `True` and force the execution on the CPU. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be + found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + + Example: + ```python + from accelerate.utils import InitProcessGroupKwargs + + # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()` + kwargs = InitProcessGroupKwargs(...).to_kwargs() + state = PartialState(**kwargs) + ``` + """ + + _shared_state = SharedDict() + _known_attrs = [ + "_cpu", + "_mixed_precision", + "_shared_state", + "backend", + "debug", + "device", + "distributed_type", + "fork_launched", + "local_process_index", + "num_processes", + "process_index", + ] + + def __init__(self, cpu: bool = False, **kwargs): + self.__dict__ = self._shared_state + if not self.initialized: + self._cpu = cpu + self.backend = None + env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) + self.device = torch.device(env_device) if env_device is not None else None + self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") + use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) + dist_information = None + if use_sagemaker_dp is None: + use_sagemaker_dp = ( + os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" + and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO + ) + + # Sets up self.backend + imports + original_backend = kwargs.pop("backend", None) + backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, original_backend) + if original_backend is not None and backend != original_backend: + raise ValueError("Your assigned backend {original_backend} is not avaliable, please use {backend}") + self.backend = backend + self.distributed_type = distributed_type + use_deepspeed = False + if not cpu and self.backend != "xla": + if int(os.environ.get("LOCAL_RANK", -1)) != -1: + # Deal with spawning deepspeed + if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true": + if not is_deepspeed_available(): + raise ImportError( + "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" + ) + from deepspeed import comm as dist + + if is_xpu_available() and is_ccl_available(): + os.environ["CCL_PROCESS_LAUNCHER"] = "none" + os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") + os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") + + if not dist.is_initialized(): + dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) + # We need to flag to `use_deepspeed` to be True to override `distributed_type` later + use_deepspeed = True + # Deal with all other backends but XPU and CPU, that gets handled special later + elif ( + self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU) + and not torch.distributed.is_initialized() + ): + torch.distributed.init_process_group(backend=self.backend, **kwargs) + # XPU and CPU require special env configs to be set + if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU): + dist_information = get_cpu_distributed_information() + os.environ["RANK"] = str(dist_information.rank) + os.environ["WORLD_SIZE"] = str(dist_information.world_size) + os.environ["LOCAL_RANK"] = str(dist_information.local_rank) + os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size) + if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: + os.environ["CCL_PROCESS_LAUNCHER"] = "none" + os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"] + os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"] + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if ( + not os.environ.get("MASTER_ADDR", None) + and dist_information.local_world_size != dist_information.world_size + and self.backend != "mpi" + ): + raise ValueError( + "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, " + "please try exporting rank 0's hostname as `MASTER_ADDR`" + ) + kwargs["rank"] = dist_information.rank + kwargs["world_size"] = dist_information.world_size + + if ( + self.distributed_type == DistributedType.MULTI_CPU + and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0 + ): + import psutil + + num_cpu_threads_per_process = int( + psutil.cpu_count(logical=False) / dist_information.local_world_size + ) + if num_cpu_threads_per_process == 0: + num_cpu_threads_per_process = 1 + torch.set_num_threads(num_cpu_threads_per_process) + warnings.warn( + f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" + " performance." + ) + + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend=self.backend, **kwargs) + + # No backend == no distributed training + if self.backend is None: + self.distributed_type = DistributedType.NO + self.num_processes = 1 + self.process_index = 0 + self.local_process_index = 0 + elif self.backend == "xla": + # XLA needs device setting first for `set_replication` + self.set_device() + xm.set_replication(self.device, xm.get_xla_supported_devices()) + self.num_processes = xm.xrt_world_size() + self.process_index = xm.get_ordinal() + if is_torch_xla_available(check_is_tpu=True): + self.local_process_index = xm.get_local_ordinal() + else: + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + else: + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = ( + int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank + ) + self.set_device() + # Now we can change to deepseed + if use_deepspeed: + self.distributed_type = DistributedType.DEEPSPEED + + # Set CPU affinity if enabled + if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False): + set_numa_affinity(self.local_process_index) + + # Check for old RTX 4000's that can't use P2P or IB and are on old drivers + if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): + if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: + raise NotImplementedError( + "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " + 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' + "will do this automatically." + ) + # Important: This should be the *only* code outside of `self.initialized!` + self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) + + def __repr__(self) -> str: + return ( + f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" + f"Num processes: {self.num_processes}\n" + f"Process index: {self.process_index}\n" + f"Local process index: {self.local_process_index}\n" + f"Device: {self.device}\n" + ) + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + PartialState._shared_state.clear() + + @property + def initialized(self) -> bool: + "Returns whether the `PartialState` has been initialized" + return self._shared_state != {} + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.distributed_type != DistributedType.NO and self.num_processes > 1 + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return self.process_index == self.num_processes - 1 + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return ( + self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process + ) + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return ( + self.local_process_index == 0 + if self.distributed_type != DistributedType.MEGATRON_LM + else self.is_last_process + ) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> if state.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> state.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + DistributedType.DEEPSPEED, + DistributedType.FSDP, + ): + torch.distributed.barrier() + elif self.distributed_type == DistributedType.XLA: + xm.rendezvous("accelerate.utils.wait_for_everyone") + + def _goes_first(self, is_main: bool): + if not is_main: + self.wait_for_everyone() + + yield + + if is_main: + self.wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate import PartialState + + state = PartialState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + if self.num_processes == 1: + yield inputs + return + length = len(inputs) + # Nested dictionary of any types + if isinstance(inputs, dict): + length = len(inputs[list(inputs.keys())[0]]) + if not all(len(v) == length for v in inputs.values()): + raise ValueError("All values in the dictionary must have the same length") + num_samples_per_process = math.ceil(length / self.num_processes) + start_index = self.process_index * num_samples_per_process + end_index = start_index + num_samples_per_process + if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): + end_index = length + + def _split_values(inputs, start_index, end_index): + if isinstance(inputs, (list, tuple, torch.Tensor)): + if start_index >= len(inputs): + result = inputs[-1:] + else: + result = inputs[start_index:end_index] + if apply_padding: + if isinstance(result, torch.Tensor): + from accelerate.utils import pad_across_processes, send_to_device + + # The tensor needs to be on the device before we can pad it + tensorized_result = send_to_device(result, self.device) + result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) + else: + result += [result[-1]] * (num_samples_per_process - len(result)) + return result + elif isinstance(inputs, dict): + for key in inputs.keys(): + inputs[key] = _split_values(inputs[key], start_index, end_index) + return inputs + else: + if is_datasets_available(): + from datasets import Dataset + + if isinstance(inputs, Dataset): + if start_index >= len(inputs): + start_index = len(inputs) - 1 + if end_index > len(inputs): + end_index = len(inputs) + result_idcs = list(range(start_index, end_index)) + if apply_padding: + result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) + return inputs.select(result_idcs) + return inputs + + yield _split_values(inputs, start_index, end_index) + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + yield from self._goes_first(self.is_main_process) + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> with state.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {state.local_process_index}") + ``` + """ + yield from self._goes_first(self.is_local_main_process) + + def on_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + + + >>> @state.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + if not self.initialized: + raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") + if self.is_main_process or not self.use_distributed: + return function + return do_nothing + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the local main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + if self.is_local_main_process or not self.use_distributed: + return function + return do_nothing + + def on_last_process(self, function: Callable[..., Any]): + """ + Decorator that only runs the decorated function on the last process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_last_process + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + if self.is_last_process or not self.use_distributed: + return function + return do_nothing + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_process(process_index=2) + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_process, process_index=process_index) + if (self.process_index == process_index) or (not self.use_distributed): + return function + return do_nothing + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index on the current node. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_local_process, local_process_index=local_process_index) + if (self.local_process_index == local_process_index) or (not self.use_distributed): + return function + return do_nothing + + def print(self, *args, **kwargs): + if self.is_local_main_process: + print(*args, **kwargs) + + @property + def default_device(self) -> torch.device: + """ + Returns the default device which is: + - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. + - CUDA if `torch.cuda.is_available()` + - MLU if `is_mlu_available()` + - NPU if `is_npu_available()` + - CPU otherwise + """ + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + return torch.device("mps") + elif is_mlu_available(): + return torch.device("mlu") + elif torch.cuda.is_available(): + return torch.device("cuda") + elif is_xpu_available(): + return torch.device("xpu:0") + elif is_npu_available(): + return torch.device("npu") + else: + return torch.device("cpu") + + def _prepare_backend( + self, cpu: bool = False, sagemaker_dp=False, backend: str = None + ) -> tuple[str, DistributedType]: + "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly" + distributed_type = None + if sagemaker_dp: + import smdistributed.dataparallel.torch.torch_smddp # noqa + + backend = "smddp" + distributed_type = DistributedType.MULTI_GPU + elif is_torch_xla_available(): + backend = "xla" + distributed_type = DistributedType.XLA + elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu: + if is_mlu_available(): + backend = "cncl" + distributed_type = DistributedType.MULTI_MLU + elif torch.cuda.is_available(): + if backend is None: + backend = "nccl" + distributed_type = DistributedType.MULTI_GPU + elif is_npu_available(): + backend = "hccl" + distributed_type = DistributedType.MULTI_NPU + + if distributed_type is None and ( + int(os.environ.get("LOCAL_RANK", -1)) != -1 + or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 + ): + if not cpu and is_xpu_available(): + distributed_type = DistributedType.MULTI_XPU + else: + distributed_type = DistributedType.MULTI_CPU + + if ( + backend in (None, "ccl") + and is_ccl_available() + and (get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU) + ): + if get_ccl_version() >= "1.12": + import oneccl_bindings_for_pytorch # noqa: F401 + else: + import torch_ccl # noqa: F401 + + backend = "ccl" + elif backend in (None, "mpi") and torch.distributed.is_mpi_available(): + backend = "mpi" + else: + backend = "gloo" + if distributed_type is None: + distributed_type = DistributedType.NO + + return backend, distributed_type + + def set_device(self): + """ + Sets the device in `self.device` to the current distributed environment. + """ + if self.device is not None: + return + if self.distributed_type == DistributedType.NO: + self.device = torch.device("cpu") if self._cpu else self.default_device + return + device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower() + if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"): + raise ValueError( + f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!" + ) + if device == "xla": + self.device = xm.xla_device() + else: + if device == "gpu": + device = "cuda" + self.device = torch.device(device, self.local_process_index) + if self.device is not None: + if device == "xpu": + torch.xpu.set_device(self.device) + elif device == "mlu": + torch.mlu.set_device(self.device) + elif device == "npu": + torch.npu.set_device(self.device) + elif device == "cuda": + torch.cuda.set_device(self.device) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`PartialState` object has no attribute `{name}`. " + "This happens if `PartialState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'PartialState' object has no attribute '{name}'") + + +class AcceleratorState: + """ + Singleton class that has information about the current training environment. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + """ + + _shared_state = SharedDict() + _known_attrs = PartialState._known_attrs + [ + "deepspeed_plugin", + "use_ipex", + "fsdp_plugin", + "megatron_lm_plugin", + "dynamo_plugin", + ] + + def __init__( + self, + mixed_precision: str = None, + cpu: bool = False, + dynamo_plugin=None, + deepspeed_plugin=None, + fsdp_plugin=None, + megatron_lm_plugin=None, + _from_accelerator: bool = False, + **kwargs, + ): + self.__dict__ = self._shared_state + if parse_flag_from_env("ACCELERATE_USE_CPU"): + cpu = True + if PartialState._shared_state == {}: + PartialState(cpu, **kwargs) + self.__dict__.update(PartialState._shared_state) + self._check_initialized(mixed_precision, cpu) + if not self.initialized: + self.deepspeed_plugin = None + self.use_ipex = None + mixed_precision = ( + parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") + if mixed_precision is None + else mixed_precision.lower() + ) + if mixed_precision == "fp8": + if not is_fp8_available(): + raise ValueError( + "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." + ) + elif not check_fp8_capability(): + logger.warning( + f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " + "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " + "or higher, compute capability of 8.9 or higher). Will use FP16 instead." + ) + mixed_precision = "fp16" + + self.dynamo_plugin = dynamo_plugin + if not _from_accelerator: + raise ValueError( + "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " + "before using any functionality from the `accelerate` library." + ) + # deepspeed handles mixed_precision using deepspeed_config + self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision + if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): + if mixed_precision == "bf16": + if os.environ.get("ACCELERATE_DOWNCAST_BF16"): + os.environ["XLA_USE_BF16"] = str(0) + os.environ["XLA_DOWNCAST_BF16"] = str(1) + self.downcast_bfloat = True + else: + os.environ["XLA_USE_BF16"] = str(1) + os.environ["XLA_DOWNCAST_BF16"] = str(0) + self.downcast_bfloat = False + elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: + self.deepspeed_plugin = deepspeed_plugin + elif self.distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + ]: + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [ + DistributedType.MULTI_XPU, + ]: + self.distributed_type = DistributedType.MEGATRON_LM + megatron_lm_plugin.set_mixed_precision(self._mixed_precision) + self.megatron_lm_plugin = megatron_lm_plugin + elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if is_ipex_available(): + # check if user disables it explicitly + self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) + else: + self.use_ipex = False + if ( + self.dynamo_plugin.backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "cuda" + ): + torch.backends.cuda.matmul.allow_tf32 = True + PartialState._shared_state["distributed_type"] = self.distributed_type + + @property + def initialized(self) -> bool: + return self._shared_state != PartialState._shared_state + + def __repr__(self): + repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" + if self.distributed_type == DistributedType.DEEPSPEED: + repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" + return repr + + def _check_initialized(self, mixed_precision=None, cpu=None): + "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" + if self.initialized: + err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." + if cpu and self.device.type != "cpu": + raise ValueError(err.format(flag="cpu=True")) + if ( + mixed_precision is not None + and mixed_precision != self._mixed_precision + and self.distributed_type != DistributedType.DEEPSPEED + ): + raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) + + # For backward compatibility + @property + def use_fp16(self): + warnings.warn( + "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`AcceleratorState.mixed_precision == 'fp16'` instead.", + FutureWarning, + ) + return self._mixed_precision != "no" + + @property + def mixed_precision(self): + if self.distributed_type == DistributedType.DEEPSPEED: + config = self.deepspeed_plugin.deepspeed_config + if config.get("fp16", {}).get("enabled", False): + mixed_precision = "fp16" + elif config.get("bf16", {}).get("enabled", False): + mixed_precision = "bf16" + else: + mixed_precision = "no" + else: + mixed_precision = self._mixed_precision + return mixed_precision + + @staticmethod + def _reset_state(reset_partial_state: bool = False): + "Resets `_shared_state`, is used internally and should not be called" + AcceleratorState._shared_state.clear() + if reset_partial_state: + PartialState._reset_state() + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return PartialState().use_distributed + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return PartialState().is_last_process + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return PartialState().is_main_process + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return PartialState().is_local_main_process + + def wait_for_everyone(self): + PartialState().wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate.state import AcceleratorState + + state = AcceleratorState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().local_main_process_first(): + yield + + def print(self, *args, **kwargs): + PartialState().print(*args, **kwargs) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`AcceleratorState` object has no attribute `{name}`. " + "This happens if `AcceleratorState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'") + + +class GradientState: + """ + Singleton class that has information related to gradient synchronization for gradient accumulation + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader + - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader + - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices + - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over + - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are + being iterated over + - **num_steps** (`int`) -- The number of steps to accumulate over + - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient + accumulation + - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader + iteration and the number of total steps reset + - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized + as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently, + after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence + is_xla_gradients_synced is always true. + """ + + _shared_state = SharedDict() + + def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None): + self.__dict__ = self._shared_state + if not self.initialized: + self.sync_gradients = True + self.active_dataloader = None + self.dataloader_references = [None] + self.plugin_kwargs = ( + gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {} + ) + self._is_xla_gradients_synced = False + + # Plugin args are different and can be updated + if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs(): + self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() + + @property + def num_steps(self) -> int: + "Returns the number of steps to accumulate over" + return self.plugin_kwargs.get("num_steps", 1) + + @property + def adjust_scheduler(self) -> bool: + "Returns whether the scheduler should be adjusted" + return self.plugin_kwargs.get("adjust_scheduler", False) + + @property + def sync_with_dataloader(self) -> bool: + "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset" + return self.plugin_kwargs.get("sync_with_dataloader", True) + + @property + def initialized(self) -> bool: + "Returns whether the `GradientState` has been initialized" + return GradientState._shared_state != {} + + @property + def end_of_dataloader(self) -> bool: + "Returns whether we have reached the end of the current dataloader" + if not self.in_dataloader: + return False + return self.active_dataloader.end_of_dataloader + + @property + def remainder(self) -> int: + "Returns the number of extra samples that were added from padding the dataloader" + if not self.in_dataloader: + return -1 + return self.active_dataloader.remainder + + def __repr__(self): + return ( + f"Sync Gradients: {self.sync_gradients}\n" + f"At end of current dataloader: {self.end_of_dataloader}\n" + f"Extra samples added: {self.remainder}\n" + f"Gradient accumulation plugin: {self.plugin_kwargs}\n" + ) + + @property + def is_xla_gradients_synced(self): + "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true." + if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False): + return True + return self._is_xla_gradients_synced + + @is_xla_gradients_synced.setter + def is_xla_gradients_synced(self, is_synced): + "Set the _is_xla_gradients_synced attribute." + self._is_xla_gradients_synced = is_synced + + def _set_sync_gradients(self, sync_gradients): + "Private function that sets whether gradients should be synchronized. Users should not have to call this." + self.sync_gradients = sync_gradients + # Allow grad-sync to automatically work on TPUs + if ( + self.sync_gradients + and is_torch_xla_available(check_is_tpu=True) + and PartialState().distributed_type == DistributedType.XLA + ): + xm.mark_step() + + def _add_dataloader(self, dataloader): + "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this." + self.active_dataloader = dataloader + self.dataloader_references.append(self.active_dataloader) + + def _remove_dataloader(self, dataloader): + "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this." + self.dataloader_references.remove(dataloader) + self.active_dataloader = self.dataloader_references[-1] + + @property + def in_dataloader(self) -> bool: + "Returns whether the current process is in a dataloader" + return self.active_dataloader is not None + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + GradientState._shared_state.clear() diff --git a/llmeval-env/lib/python3.10/site-packages/accelerate/tracking.py b/llmeval-env/lib/python3.10/site-packages/accelerate/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..5efba19bc6769d9c70ea8b17b8da784b908f529f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/accelerate/tracking.py @@ -0,0 +1,1023 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Expectation: +# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`} + +import json +import os +import time +from functools import wraps +from typing import Any, Dict, List, Optional, Union + +import yaml + +from .logging import get_logger +from .state import PartialState +from .utils import ( + LoggerType, + is_aim_available, + is_clearml_available, + is_comet_ml_available, + is_dvclive_available, + is_mlflow_available, + is_tensorboard_available, + is_wandb_available, + listify, +) + + +_available_trackers = [] + +if is_tensorboard_available(): + _available_trackers.append(LoggerType.TENSORBOARD) + +if is_wandb_available(): + _available_trackers.append(LoggerType.WANDB) + +if is_comet_ml_available(): + _available_trackers.append(LoggerType.COMETML) + +if is_aim_available(): + _available_trackers.append(LoggerType.AIM) + +if is_mlflow_available(): + _available_trackers.append(LoggerType.MLFLOW) + +if is_clearml_available(): + _available_trackers.append(LoggerType.CLEARML) + +if is_dvclive_available(): + _available_trackers.append(LoggerType.DVCLIVE) + +logger = get_logger(__name__) + + +def on_main_process(function): + """ + Decorator to selectively run the decorated function on the main process only based on the `main_process_only` + attribute in a class. + + Checks at function execution rather than initialization time, not triggering the initialization of the + `PartialState`. + """ + + @wraps(function) + def execute_on_main_process(self, *args, **kwargs): + if getattr(self, "main_process_only", False): + return PartialState().on_main_process(function)(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + + return execute_on_main_process + + +def get_available_trackers(): + "Returns a list of all supported available trackers in the system" + return _available_trackers + + +class GeneralTracker: + """ + A base Tracker class to be used for all logging integration implementations. + + Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to + [`Accelerator`]. + + Should implement `name`, `requires_logging_directory`, and `tracker` properties such that: + + `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory` + (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal + tracking mechanism used by a tracker class (such as the `run` for wandb) + + Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and + other functions should occur on the main process or across all processes (by default will use `True`) + """ + + main_process_only = True + + def __init__(self, _blank=False): + if not _blank: + err = "" + if not hasattr(self, "name"): + err += "`name`" + if not hasattr(self, "requires_logging_directory"): + if len(err) > 0: + err += ", " + err += "`requires_logging_directory`" + + # as tracker is a @property that relies on post-init + if "tracker" not in dir(self): + if len(err) > 0: + err += ", " + err += "`tracker`" + if len(err) > 0: + raise NotImplementedError( + f"The implementation for this tracker class is missing the following " + f"required attributes. Please define them in the class definition: " + f"{err}" + ) + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration + functionality of a tracking API. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + pass + + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with + special behavior for the `step parameter. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + pass + + def finish(self): + """ + Should run any finalizing functions within the tracking API. If the API should not have one, just don't + overwrite that method. + """ + pass + + +class TensorBoardTracker(GeneralTracker): + """ + A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run + logging_dir (`str`, `os.PathLike`): + Location for TensorBoard logs to be stored. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method. + """ + + name = "tensorboard" + requires_logging_directory = True + + @on_main_process + def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs): + try: + from torch.utils import tensorboard + except ModuleNotFoundError: + import tensorboardX as tensorboard + super().__init__() + self.run_name = run_name + self.logging_dir = os.path.join(logging_dir, run_name) + self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs) + logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.add_hparams(values, metric_dict={}) + self.writer.flush() + project_run_name = time.time() + dir_name = os.path.join(self.logging_dir, str(project_run_name)) + os.makedirs(dir_name, exist_ok=True) + with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile: + try: + yaml.dump(values, outfile) + except yaml.representer.RepresenterError: + logger.error("Serialization to store hyperparameters failed") + raise + logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `SummaryWriter.add_scaler`, + `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`. + """ + values = listify(values) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.add_scalar(k, v, global_step=step, **kwargs) + elif isinstance(v, str): + self.writer.add_text(k, v, global_step=step, **kwargs) + elif isinstance(v, dict): + self.writer.add_scalars(k, v, global_step=step, **kwargs) + self.writer.flush() + logger.debug("Successfully logged to TensorBoard") + + @on_main_process + def log_images(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `SummaryWriter.add_image` method. + """ + for k, v in values.items(): + self.writer.add_images(k, v, global_step=step, **kwargs) + logger.debug("Successfully logged images to TensorBoard") + + @on_main_process + def finish(self): + """ + Closes `TensorBoard` writer + """ + self.writer.close() + logger.debug("TensorBoard writer closed") + + +class WandBTracker(GeneralTracker): + """ + A `Tracker` class that supports `wandb`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `wandb.init` method. + """ + + name = "wandb" + requires_logging_directory = False + main_process_only = False + + @on_main_process + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + + import wandb + + self.run = wandb.init(project=self.run_name, **kwargs) + logger.debug(f"Initialized WandB project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + import wandb + + wandb.config.update(values, allow_val_change=True) + logger.debug("Stored initial configuration hyperparameters to WandB") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + self.run.log(values, step=step, **kwargs) + logger.debug("Successfully logged to WandB") + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + import wandb + + for k, v in values.items(): + self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs) + logger.debug("Successfully logged images to WandB") + + @on_main_process + def log_table( + self, + table_name: str, + columns: List[str] = None, + data: List[List[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either + with `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name to give to the logged table on the wandb workspace + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + import wandb + + values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)} + self.log(values, step=step, **kwargs) + + @on_main_process + def finish(self): + """ + Closes `wandb` writer + """ + self.run.finish() + logger.debug("WandB run closed") + + +class CometMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script. + + API keys must be stored in a Comet config file. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `Experiment.__init__` method. + """ + + name = "comet_ml" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + + from comet_ml import Experiment + + self.writer = Experiment(project_name=run_name, **kwargs) + logger.debug(f"Initialized CometML project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.log_parameters(values) + logger.debug("Stored initial configuration hyperparameters to CometML") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`, + or `Experiment.log_metrics` method based on the contents of `values`. + """ + if step is not None: + self.writer.set_step(step) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.log_metric(k, v, step=step, **kwargs) + elif isinstance(v, str): + self.writer.log_other(k, v, **kwargs) + elif isinstance(v, dict): + self.writer.log_metrics(v, step=step, **kwargs) + logger.debug("Successfully logged to CometML") + + @on_main_process + def finish(self): + """ + Closes `comet-ml` writer + """ + self.writer.end() + logger.debug("CometML run closed") + + +class AimTracker(GeneralTracker): + """ + A `Tracker` class that supports `aim`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `Run.__init__` method. + """ + + name = "aim" + requires_logging_directory = True + + @on_main_process + def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs): + self.run_name = run_name + + from aim import Run + + self.writer = Run(repo=logging_dir, **kwargs) + self.writer.name = self.run_name + logger.debug(f"Initialized Aim project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + self.writer["hparams"] = values + + @on_main_process + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `Run.track` method. + """ + # Note: replace this with the dictionary support when merged + for key, value in values.items(): + self.writer.track(value, name=key, step=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`): + Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a + tuple is provided, the first element should be the image and the second element should be the caption. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs (`Dict[str, dict]`): + Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the + keys `aim_image` and `track`, respectively. + """ + import aim + + aim_image_kw = {} + track_kw = {} + + if kwargs is not None: + aim_image_kw = kwargs.get("aim_image", {}) + track_kw = kwargs.get("track", {}) + + for key, value in values.items(): + if isinstance(value, tuple): + img, caption = value + else: + img, caption = value, "" + aim_image = aim.Image(img, caption=caption, **aim_image_kw) + self.writer.track(aim_image, name=key, step=step, **track_kw) + + @on_main_process + def finish(self): + """ + Closes `aim` writer + """ + self.writer.close() + + +class MLflowTracker(GeneralTracker): + """ + A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script. + + Args: + experiment_name (`str`, *optional*): + Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument. + logging_dir (`str` or `os.PathLike`, defaults to `"."`): + Location for mlflow logs to be stored. + run_id (`str`, *optional*): + If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s + end time is unset and its status is set to running, but the run’s other attributes (source_version, + source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument. + tags (`Dict[str, str]`, *optional*): + An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a + run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are + set on the new run. Environment variable MLFLOW_TAGS has priority over this argument. + nested_run (`bool`, *optional*, defaults to `False`): + Controls whether run is nested in parent run. True creates a nested run. Environment variable + MLFLOW_NESTED_RUN has priority over this argument. + run_name (`str`, *optional*): + Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified. + description (`str`, *optional*): + An optional string that populates the description box of the run. If a run is being resumed, the + description is set on the resumed run. If a new run is being created, the description is set on the new + run. + """ + + name = "mlflow" + requires_logging_directory = False + + @on_main_process + def __init__( + self, + experiment_name: str = None, + logging_dir: Optional[Union[str, os.PathLike]] = None, + run_id: Optional[str] = None, + tags: Optional[Union[Dict[str, Any], str]] = None, + nested_run: Optional[bool] = False, + run_name: Optional[str] = None, + description: Optional[str] = None, + ): + experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name) + run_id = os.environ.get("MLFLOW_RUN_ID", run_id) + tags = os.environ.get("MLFLOW_TAGS", tags) + if isinstance(tags, str): + tags = json.loads(tags) + + nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run) + + import mlflow + + exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'") + if len(exps) > 0: + if len(exps) > 1: + logger.warning("Multiple experiments with the same name found. Using first one.") + experiment_id = exps[0].experiment_id + else: + experiment_id = mlflow.create_experiment( + name=experiment_name, + artifact_location=logging_dir, + tags=tags, + ) + + self.active_run = mlflow.start_run( + run_id=run_id, + experiment_id=experiment_id, + run_name=run_name, + nested=nested_run, + tags=tags, + description=description, + ) + + logger.debug(f"Initialized mlflow experiment {experiment_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.active_run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + import mlflow + + for name, value in list(values.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH: + logger.warning_once( + f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute." + ) + del values[name] + + values_list = list(values.items()) + + # MLflow cannot log more than 100 values in one go, so we have to split it + for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH): + mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH])) + + logger.debug("Stored initial configuration hyperparameters to MLflow") + + @on_main_process + def log(self, values: dict, step: Optional[int]): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + metrics = {} + for k, v in values.items(): + if isinstance(v, (int, float)): + metrics[k] = v + else: + logger.warning_once( + f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + import mlflow + + mlflow.log_metrics(metrics, step=step) + logger.debug("Successfully logged to mlflow") + + @on_main_process + def finish(self): + """ + End the active MLflow run. + """ + import mlflow + + mlflow.end_run() + + +class ClearMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `clearml`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this + argument. + **kwargs (additional keyword arguments, *optional*): + Kwargs passed along to the `Task.__init__` method. + """ + + name = "clearml" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: str = None, **kwargs): + from clearml import Task + + current_task = Task.current_task() + self._initialized_externally = False + if current_task: + self._initialized_externally = True + self.task = current_task + return + + kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name)) + kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name)) + self.task = Task.init(**kwargs) + + @property + def tracker(self): + return self.task + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + return self.task.connect_configuration(values) + + @on_main_process + def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs): + """ + Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be + ints or floats + + Args: + values (`Dict[str, Union[int, float]]`): + Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will + be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed. + Otherwise, the value will be reported under the 'train' series, and no prefix will be removed. + step (`int`, *optional*): + If specified, the values will be reported as scalars, with the iteration number equal to `step`. + Otherwise they will be reported as single values. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_single_value` or + `clearml.Logger.report_scalar` methods. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + if not isinstance(v, (int, float)): + logger.warning_once( + "Accelerator is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of ClearML logger's report_scalar() " + "is incorrect so we dropped this attribute." + ) + continue + if step is None: + clearml_logger.report_single_value(name=k, value=v, **kwargs) + continue + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_image` method. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs) + + @on_main_process + def log_table( + self, + table_name: str, + columns: List[str] = None, + data: List[List[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name of the table + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table. If `columns` is not specified, then the first entry in data will be + the name of the columns of the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_table` method. + """ + to_report = dataframe + if dataframe is None: + if data is None: + raise ValueError( + "`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`" + ) + to_report = [columns] + data if columns else data + title, series = ClearMLTracker._get_title_series(table_name) + self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs) + + @on_main_process + def finish(self): + """ + Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this + function is a noop + """ + if self.task and not self._initialized_externally: + self.task.close() + + @staticmethod + def _get_title_series(name): + for prefix in ["eval", "test", "train"]: + if name.startswith(prefix + "_"): + return name[len(prefix) + 1 :], prefix + return name, "train" + + +class DVCLiveTracker(GeneralTracker): + """ + A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Ignored for dvclive. See `kwargs` instead. + kwargs: + Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live). + + Example: + + ```py + from accelerate import Accelerator + + accelerator = Accelerator(log_with="dvclive") + accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}}) + ``` + """ + + name = "dvclive" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs): + from dvclive import Live + + super().__init__() + self.live = live if live is not None else Live(**kwargs) + + @property + def tracker(self): + return self.live + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, or `int`. + """ + self.live.log_params(values) + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to `dvclive.Live.log_metric()`. + """ + from dvclive.plots import Metric + + if step is not None: + self.live.step = step + for k, v in values.items(): + if Metric.could_log(v): + self.live.log_metric(k, v, **kwargs) + else: + logger.warning_once( + "Accelerator attempted to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) + self.live.next_step() + + @on_main_process + def finish(self): + """ + Closes `dvclive.Live()`. + """ + self.live.end() + + +LOGGER_TYPE_TO_CLASS = { + "aim": AimTracker, + "comet_ml": CometMLTracker, + "mlflow": MLflowTracker, + "tensorboard": TensorBoardTracker, + "wandb": WandBTracker, + "clearml": ClearMLTracker, + "dvclive": DVCLiveTracker, +} + + +def filter_trackers( + log_with: List[Union[str, LoggerType, GeneralTracker]], + logging_dir: Union[str, os.PathLike] = None, +): + """ + Takes in a list of potential tracker types and checks that: + - The tracker wanted is available in that environment + - Filters out repeats of tracker types + - If `all` is in `log_with`, will return all trackers in the environment + - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` + + Args: + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + - `"mlflow"` + - `"dvclive"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + logging_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing logs of locally-compatible loggers. + """ + loggers = [] + if log_with is not None: + if not isinstance(log_with, (list, tuple)): + log_with = [log_with] + if "all" in log_with or LoggerType.ALL in log_with: + loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers() + else: + for log_type in log_with: + if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker): + raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}") + if issubclass(type(log_type), GeneralTracker): + loggers.append(log_type) + else: + log_type = LoggerType(log_type) + if log_type not in loggers: + if log_type in get_available_trackers(): + tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)] + if tracker_init.requires_logging_directory: + if logging_dir is None: + raise ValueError( + f"Logging with `{log_type}` requires a `logging_dir` to be passed in." + ) + loggers.append(log_type) + else: + logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.") + + return loggers diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8913ee7b59bfb7618355927d94da0e627a274834 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/converters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/converters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dc613020826ac5f4392d42323176291aa652d92 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/converters.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/exceptions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50d4eb783a212966b037c09630be046fb3680d8f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/exceptions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/filters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/filters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dde6afdca441df2bc2a4d6ef15d40eb9f5fb46f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/filters.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/setters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/setters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9f39e3421dd62046f46c757cf7ff5ed137edd3f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/setters.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/validators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa913c8966a4df87ba3118913529773108a0e12c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/attrs/__pycache__/validators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/attrs/exceptions.py b/llmeval-env/lib/python3.10/site-packages/attrs/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..3323f9d2112c54b203763d45b455bd5abbe020f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a2a019c34c0b1bbb046fe01737be08d0e592ec8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__diff.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ea17e94c65c6ce43f633d012de1b76f79f5f721 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/__info__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fca7f608241b6d0142f147c715fa34df75f6e802 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_dill.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb1145d59614a78f14aa114cbbc2b19dc5a24c34 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_objects.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c75660cf5cf223882837cf1bb801dbd03cb128d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/_shims.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c835117389f93674e9e492cd0b371ea15e335f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/detect.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04408714f9d9b51cb7b37791859df0c16cde9b3a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/session.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b57c5cc790ebe1826e8affaedcd886b59df662f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dill/__pycache__/temp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..a9c497799bc34129eaba30ad2ae2047a4d84260c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/METADATA @@ -0,0 +1,201 @@ +Metadata-Version: 2.1 +Name: evaluate +Version: 0.4.2 +Summary: HuggingFace community-driven open-source library of evaluation +Home-page: https://github.com/huggingface/evaluate +Author: HuggingFace Inc. +Author-email: leandro@huggingface.co +License: Apache 2.0 +Download-URL: https://github.com/huggingface/evaluate/tags +Keywords: metrics machine learning evaluate evaluation +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8.0 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: datasets >=2.0.0 +Requires-Dist: numpy >=1.17 +Requires-Dist: dill +Requires-Dist: pandas +Requires-Dist: requests >=2.19.0 +Requires-Dist: tqdm >=4.62.1 +Requires-Dist: xxhash +Requires-Dist: multiprocess +Requires-Dist: fsspec[http] >=2021.05.0 +Requires-Dist: huggingface-hub >=0.7.0 +Requires-Dist: packaging +Requires-Dist: importlib-metadata ; python_version < "3.8" +Provides-Extra: dev +Requires-Dist: absl-py ; extra == 'dev' +Requires-Dist: charcut >=1.1.1 ; extra == 'dev' +Requires-Dist: cer >=1.2.0 ; extra == 'dev' +Requires-Dist: nltk ; extra == 'dev' +Requires-Dist: pytest ; extra == 'dev' +Requires-Dist: pytest-datadir ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Requires-Dist: tensorflow !=2.6.0,!=2.6.1,<=2.10,>=2.3 ; extra == 'dev' +Requires-Dist: torch ; extra == 'dev' +Requires-Dist: accelerate ; extra == 'dev' +Requires-Dist: bert-score >=0.3.6 ; extra == 'dev' +Requires-Dist: rouge-score >=0.1.2 ; extra == 'dev' +Requires-Dist: sacrebleu ; extra == 'dev' +Requires-Dist: sacremoses ; extra == 'dev' +Requires-Dist: scipy >=1.10.0 ; extra == 'dev' +Requires-Dist: seqeval ; extra == 'dev' +Requires-Dist: scikit-learn ; extra == 'dev' +Requires-Dist: jiwer ; extra == 'dev' +Requires-Dist: sentencepiece ; extra == 'dev' +Requires-Dist: transformers ; extra == 'dev' +Requires-Dist: mauve-text ; extra == 'dev' +Requires-Dist: trectools ; extra == 'dev' +Requires-Dist: toml >=0.10.1 ; extra == 'dev' +Requires-Dist: requests-file >=1.5.1 ; extra == 'dev' +Requires-Dist: tldextract >=3.1.0 ; extra == 'dev' +Requires-Dist: texttable >=1.6.3 ; extra == 'dev' +Requires-Dist: unidecode >=1.3.4 ; extra == 'dev' +Requires-Dist: Werkzeug >=1.0.1 ; extra == 'dev' +Requires-Dist: six ~=1.15.0 ; extra == 'dev' +Requires-Dist: black ~=22.0 ; extra == 'dev' +Requires-Dist: flake8 >=3.8.3 ; extra == 'dev' +Requires-Dist: isort >=5.0.0 ; extra == 'dev' +Requires-Dist: pyyaml >=5.3.1 ; extra == 'dev' +Provides-Extra: docs +Requires-Dist: s3fs ; extra == 'docs' +Provides-Extra: evaluator +Requires-Dist: transformers ; extra == 'evaluator' +Requires-Dist: scipy >=1.7.1 ; extra == 'evaluator' +Provides-Extra: quality +Requires-Dist: black ~=22.0 ; extra == 'quality' +Requires-Dist: flake8 >=3.8.3 ; extra == 'quality' +Requires-Dist: isort >=5.0.0 ; extra == 'quality' +Requires-Dist: pyyaml >=5.3.1 ; extra == 'quality' +Provides-Extra: template +Requires-Dist: cookiecutter ; extra == 'template' +Requires-Dist: gradio >=3.0.0 ; extra == 'template' +Provides-Extra: tensorflow +Requires-Dist: tensorflow !=2.6.0,!=2.6.1,>=2.2.0 ; extra == 'tensorflow' +Provides-Extra: tensorflow_gpu +Requires-Dist: tensorflow-gpu !=2.6.0,!=2.6.1,>=2.2.0 ; extra == 'tensorflow_gpu' +Provides-Extra: tests +Requires-Dist: absl-py ; extra == 'tests' +Requires-Dist: charcut >=1.1.1 ; extra == 'tests' +Requires-Dist: cer >=1.2.0 ; extra == 'tests' +Requires-Dist: nltk ; extra == 'tests' +Requires-Dist: pytest ; extra == 'tests' +Requires-Dist: pytest-datadir ; extra == 'tests' +Requires-Dist: pytest-xdist ; extra == 'tests' +Requires-Dist: tensorflow !=2.6.0,!=2.6.1,<=2.10,>=2.3 ; extra == 'tests' +Requires-Dist: torch ; extra == 'tests' +Requires-Dist: accelerate ; extra == 'tests' +Requires-Dist: bert-score >=0.3.6 ; extra == 'tests' +Requires-Dist: rouge-score >=0.1.2 ; extra == 'tests' +Requires-Dist: sacrebleu ; extra == 'tests' +Requires-Dist: sacremoses ; extra == 'tests' +Requires-Dist: scipy >=1.10.0 ; extra == 'tests' +Requires-Dist: seqeval ; extra == 'tests' +Requires-Dist: scikit-learn ; extra == 'tests' +Requires-Dist: jiwer ; extra == 'tests' +Requires-Dist: sentencepiece ; extra == 'tests' +Requires-Dist: transformers ; extra == 'tests' +Requires-Dist: mauve-text ; extra == 'tests' +Requires-Dist: trectools ; extra == 'tests' +Requires-Dist: toml >=0.10.1 ; extra == 'tests' +Requires-Dist: requests-file >=1.5.1 ; extra == 'tests' +Requires-Dist: tldextract >=3.1.0 ; extra == 'tests' +Requires-Dist: texttable >=1.6.3 ; extra == 'tests' +Requires-Dist: unidecode >=1.3.4 ; extra == 'tests' +Requires-Dist: Werkzeug >=1.0.1 ; extra == 'tests' +Requires-Dist: six ~=1.15.0 ; extra == 'tests' +Provides-Extra: torch +Requires-Dist: torch ; extra == 'torch' + +

+
+ +
+

+ +

+ + Build + + + GitHub + + + Documentation + + + GitHub release + + + Contributor Covenant + +

+ +🤗 Evaluate is a library that makes evaluating and comparing models and reporting their performance easier and more standardized. + +It currently contains: + +- **implementations of dozens of popular metrics**: the existing metrics cover a variety of tasks spanning from NLP to Computer Vision, and include dataset-specific metrics for datasets. With a simple command like `accuracy = load("accuracy")`, get any of these metrics ready to use for evaluating a ML model in any framework (Numpy/Pandas/PyTorch/TensorFlow/JAX). +- **comparisons and measurements**: comparisons are used to measure the difference between models and measurements are tools to evaluate datasets. +- **an easy way of adding new evaluation modules to the 🤗 Hub**: you can create new evaluation modules and push them to a dedicated Space in the 🤗 Hub with `evaluate-cli create [metric name]`, which allows you to see easily compare different metrics and their outputs for the same sets of references and predictions. + +[🎓 **Documentation**](https://huggingface.co/docs/evaluate/) + +🔎 **Find a [metric](https://huggingface.co/evaluate-metric), [comparison](https://huggingface.co/evaluate-comparison), [measurement](https://huggingface.co/evaluate-measurement) on the Hub** + +[🌟 **Add a new evaluation module**](https://huggingface.co/docs/evaluate/) + +🤗 Evaluate also has lots of useful features like: + +- **Type checking**: the input types are checked to make sure that you are using the right input formats for each metric +- **Metric cards**: each metrics comes with a card that describes the values, limitations and their ranges, as well as providing examples of their usage and usefulness. +- **Community metrics:** Metrics live on the Hugging Face Hub and you can easily add your own metrics for your project or to collaborate with others. + + +# Installation + +## With pip + +🤗 Evaluate can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance) + +```bash +pip install evaluate +``` + +# Usage + +🤗 Evaluate's main methods are: + +- `evaluate.list_evaluation_modules()` to list the available metrics, comparisons and measurements +- `evaluate.load(module_name, **kwargs)` to instantiate an evaluation module +- `results = module.compute(*kwargs)` to compute the result of an evaluation module + +# Adding a new evaluation module + +First install the necessary dependencies to create a new metric with the following command: +```bash +pip install evaluate[template] +``` +Then you can get started with the following command which will create a new folder for your metric and display the necessary steps: +```bash +evaluate-cli create "Awesome Metric" +``` +See this [step-by-step guide](https://huggingface.co/docs/evaluate/creating_and_sharing) in the documentation for detailed instructions. + +## Credits + +Thanks to [@marella](https://github.com/marella) for letting us use the `evaluate` namespace on PyPi previously used by his [library](https://github.com/marella/evaluate). + + diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ba33177c3595e96397a002904346b67c003f148f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/RECORD @@ -0,0 +1,64 @@ +../../../bin/evaluate-cli,sha256=dRGA6d7NGT1TqpsZkE6O4Cp55DLklOgvRQ9WDz6w8tc,263 +evaluate-0.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +evaluate-0.4.2.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +evaluate-0.4.2.dist-info/METADATA,sha256=kx50-JaxQsGbQh_BcF3rsIiA2q6uNuDiTn9Q5OIsBxw,9349 +evaluate-0.4.2.dist-info/RECORD,, +evaluate-0.4.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +evaluate-0.4.2.dist-info/entry_points.txt,sha256=m2P3heof0lsg47nq6tYW_yUtxTfimd3RuD26Yk8KMkM,70 +evaluate-0.4.2.dist-info/top_level.txt,sha256=wBEoxird-u8p4OKDwq5z9rlfH-ybeez8rjaKNLNJ3B0,9 +evaluate/__init__.py,sha256=z7vL4diCrdE-vd-oUY15dyao3kbc4H_9VvA2-T3jJuU,1754 +evaluate/__pycache__/__init__.cpython-310.pyc,, +evaluate/__pycache__/config.cpython-310.pyc,, +evaluate/__pycache__/hub.cpython-310.pyc,, +evaluate/__pycache__/info.cpython-310.pyc,, +evaluate/__pycache__/inspect.cpython-310.pyc,, +evaluate/__pycache__/loading.cpython-310.pyc,, +evaluate/__pycache__/module.cpython-310.pyc,, +evaluate/__pycache__/naming.cpython-310.pyc,, +evaluate/__pycache__/saving.cpython-310.pyc,, +evaluate/__pycache__/visualization.cpython-310.pyc,, +evaluate/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +evaluate/commands/__pycache__/__init__.cpython-310.pyc,, +evaluate/commands/__pycache__/evaluate_cli.cpython-310.pyc,, +evaluate/commands/evaluate_cli.py,sha256=w7GWb48JPjoC0BX7Jn12qtxQUBYOlZNhdg4YegA93Fw,4491 +evaluate/config.py,sha256=g4g-S6hVAw0Ys9As7gKaFP66pZeh8hoJJ5GEXaLSWV8,6648 +evaluate/evaluation_suite/__init__.py,sha256=TjcFihBDf_ZQAoIjSXPEC0iFBeEC_LFqCfXKbrkyhWs,4941 +evaluate/evaluation_suite/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__init__.py,sha256=JoWqRP-qCgNzDre6nO8zpJ2Iyp0eUkN7eDKPOPUXz2g,5788 +evaluate/evaluator/__pycache__/__init__.cpython-310.pyc,, +evaluate/evaluator/__pycache__/audio_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/automatic_speech_recognition.cpython-310.pyc,, +evaluate/evaluator/__pycache__/base.cpython-310.pyc,, +evaluate/evaluator/__pycache__/image_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/question_answering.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text2text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/text_generation.cpython-310.pyc,, +evaluate/evaluator/__pycache__/token_classification.cpython-310.pyc,, +evaluate/evaluator/__pycache__/utils.cpython-310.pyc,, +evaluate/evaluator/audio_classification.py,sha256=v5myOnm0PN8BWVnm4nWCzcyklaLtdnbOS3EJ09TPFhg,5804 +evaluate/evaluator/automatic_speech_recognition.py,sha256=jOveYJXsH-t5SzGe7FzXhnHeDKFhqWZUtK3S1l9XYus,4392 +evaluate/evaluator/base.py,sha256=--M302w8Bea6u6iYCc9dGFZL1wDIRGd7uUorhcmUAus,22881 +evaluate/evaluator/image_classification.py,sha256=RJ7NUS91hjZkr5JqhqtYsr5dxBkChA3Qim6An8fHT50,4751 +evaluate/evaluator/question_answering.py,sha256=ArF5BKfE9J9uC-q1GQwbvkAHw1ThgA997ERKmPS-Z4g,9566 +evaluate/evaluator/text2text_generation.py,sha256=M2itKYfIz9z_9J-Y7sXyx4HKMhQbdYwbv8oThSw8Yzw,9676 +evaluate/evaluator/text_classification.py,sha256=g1MUwa3TCUCUBGvZDmdeJ_l8BAOgbn0Q0y4TDvep8Uk,6676 +evaluate/evaluator/text_generation.py,sha256=4ZnHweTUpvNZhaprewTPms__00I8Tnje586ZDCG_ZlU,2679 +evaluate/evaluator/token_classification.py,sha256=XMzteW1coN2e3KWmpWj-OGafj22pzMa7UiHylooirHk,11546 +evaluate/evaluator/utils.py,sha256=HDKdLWLHtfpP-Hhe9cf1TFVIRsmfNgLHifDcGYujKZs,2451 +evaluate/hub.py,sha256=ZX6VYZU0EkjTWmABuJ6Zg6oHXIT2dHkHy0u8RgyL9UQ,4550 +evaluate/info.py,sha256=l5gXfqHhj77-XvFhz57Mns-Ev-lNJsLxsyYPHPvSzj0,5490 +evaluate/inspect.py,sha256=vVSCLr7HWLxIpXzwpDPuiE5XwiP5QQ82oGkdok7aO7o,4969 +evaluate/loading.py,sha256=P5MjZvrGHRgOE6jVPnyCNWOpbY-iPz_kLIydZjiNT7Q,35219 +evaluate/module.py,sha256=A1awyHek5mxi5kREJjnEQSv14HDTrl7dYyOrytinGg0,46417 +evaluate/naming.py,sha256=Lpw8JmoJfiWs4xDUMEDzcIKO9Nw9RS2lzjeuUP-9acA,2827 +evaluate/saving.py,sha256=UoixNIHmWEceJREvGZlJNViVjRkgNf3MRflwnnhnNUA,2159 +evaluate/utils/__init__.py,sha256=kdFi2pVFSXm_y4EvvuQNnlPUkOPmGLNtc9YTfxAmdsI,1201 +evaluate/utils/__pycache__/__init__.cpython-310.pyc,, +evaluate/utils/__pycache__/file_utils.cpython-310.pyc,, +evaluate/utils/__pycache__/gradio.cpython-310.pyc,, +evaluate/utils/__pycache__/logging.cpython-310.pyc,, +evaluate/utils/file_utils.py,sha256=uGkXJYWQBKNALhdxktpQ_844jCjKLFQg6l_3KKK2zGI,22602 +evaluate/utils/gradio.py,sha256=UXGRxiPsJ41Xm5gGF7Jf_1vTOPopE_wDoBIyBS0S8d4,4434 +evaluate/utils/logging.py,sha256=nRy963i3_-H0Qcer6ETgnTFiJoQhojSiapeXQ9-eUyk,6698 +evaluate/visualization.py,sha256=m-mD6vxOIQ-_KXTues2tB4r7c4jdygBybHJeidP-jgw,9293 diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbb3e69c055695cd6dccee54dec460f319c3fd25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +evaluate-cli = evaluate.commands.evaluate_cli:main + diff --git a/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6adde7833123ecc7f347ea1f5492568989dfd45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/evaluate-0.4.2.dist-info/top_level.txt @@ -0,0 +1 @@ +evaluate diff --git a/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..884a6b026f336ab7610b8bb608739ced3f64efe7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/RECORD @@ -0,0 +1,2932 @@ +../../../bin/isympy,sha256=6ew7uHi3lJexh9yurWZtOPBvnSVb4gINoJCS5Gk4YB4,239 +../../../share/man/man1/isympy.1,sha256=9DZdSOIQLikrATHlbkdDZ04LBQigZDUE0_oCXBDvdBs,6659 +__pycache__/isympy.cpython-310.pyc,, +isympy.py,sha256=gAoHa7OM0y9G5IBO7wO-uTpD-CPnd6sbmjJ_GGB0yzg,11207 +sympy-1.12.dist-info/AUTHORS,sha256=wlSBGC-YWljenH44cUwI510RfR4iTZamMi_aKjJwpUU,48572 +sympy-1.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +sympy-1.12.dist-info/LICENSE,sha256=B6XpgZ9ye0mGrSgpx6KaYyDUJXX3IOsk1xt_71c6AoY,7885 +sympy-1.12.dist-info/METADATA,sha256=PsPCJVJrEv6F-QpnHbsxepSvVwxvt2rx2RmuTXXrJqY,12577 +sympy-1.12.dist-info/RECORD,, +sympy-1.12.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +sympy-1.12.dist-info/entry_points.txt,sha256=Sp-vLJom4PRlhGfY6RpUre7SjYm33JNq9NCwCGeW-fQ,39 +sympy-1.12.dist-info/top_level.txt,sha256=elXb5xfjLdjgSSoQFk4_2Qu3lp2CIaglF9MQtfIoH7o,13 +sympy/__init__.py,sha256=85o5Yfq2EeAiES9e85A0ZD6n9GvrpanvEdUeu-V5e2w,29005 +sympy/__pycache__/__init__.cpython-310.pyc,, +sympy/__pycache__/abc.cpython-310.pyc,, +sympy/__pycache__/conftest.cpython-310.pyc,, +sympy/__pycache__/galgebra.cpython-310.pyc,, +sympy/__pycache__/release.cpython-310.pyc,, +sympy/__pycache__/this.cpython-310.pyc,, +sympy/abc.py,sha256=P1iQKfXl7Iut6Z5Y97QmGr_UqiAZ6qR-eoRMtYacGfA,3748 +sympy/algebras/__init__.py,sha256=7PRGOW30nlMOTeUPR7iy8l5xGoE2yCBEfRbjqDKWOgU,62 +sympy/algebras/__pycache__/__init__.cpython-310.pyc,, +sympy/algebras/__pycache__/quaternion.cpython-310.pyc,, +sympy/algebras/quaternion.py,sha256=RjAU_1jKNq7LQl4Iuf0BhQ2NtbbCOL3Ytyr_PPjxxlQ,47563 +sympy/algebras/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/algebras/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/algebras/tests/__pycache__/test_quaternion.cpython-310.pyc,, +sympy/algebras/tests/test_quaternion.py,sha256=WTnJxcMkapyNR4QYJFisbwc2kStw2ZYQuEV3hNalhYE,15921 +sympy/assumptions/__init__.py,sha256=PFS8djTqiNbGVMjg7PaPjEfwmjyZVfioXiRVzqqA3E0,550 +sympy/assumptions/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/__pycache__/ask.cpython-310.pyc,, +sympy/assumptions/__pycache__/ask_generated.cpython-310.pyc,, +sympy/assumptions/__pycache__/assume.cpython-310.pyc,, +sympy/assumptions/__pycache__/cnf.cpython-310.pyc,, +sympy/assumptions/__pycache__/facts.cpython-310.pyc,, +sympy/assumptions/__pycache__/refine.cpython-310.pyc,, +sympy/assumptions/__pycache__/satask.cpython-310.pyc,, +sympy/assumptions/__pycache__/sathandlers.cpython-310.pyc,, +sympy/assumptions/__pycache__/wrapper.cpython-310.pyc,, +sympy/assumptions/ask.py,sha256=MQZg3JiVEvaZuzMlOUeXjPLuAQlhb5-QNDU8Mw5mNnI,18800 +sympy/assumptions/ask_generated.py,sha256=DSsSGSwjV0K3ASMvWvatFEXviYKXR-1xPwySPsLL-c4,17083 +sympy/assumptions/assume.py,sha256=_gcFc4h_YGs9-tshoD0gmLl_RtPivDQWMWhWWLX9seo,14606 +sympy/assumptions/cnf.py,sha256=axPy2EMLHkIX83_kcsKoRFlpq3x_0YxOEjzt7FHgxc4,12706 +sympy/assumptions/facts.py,sha256=q0SDVbzmU46_8mf63Uao5pYE4MgyrhR9vn94QJqQSv8,7609 +sympy/assumptions/handlers/__init__.py,sha256=lvjAfPdz0MDjTxjuzbBSGBco2OmpZRiGixSG0oaiZi0,330 +sympy/assumptions/handlers/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/calculus.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/common.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/matrices.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/ntheory.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/order.cpython-310.pyc,, +sympy/assumptions/handlers/__pycache__/sets.cpython-310.pyc,, +sympy/assumptions/handlers/calculus.py,sha256=ul36wLjxrU_LUxEWX63dWklWHgHWw5xVT0d7BkZCdFE,7198 +sympy/assumptions/handlers/common.py,sha256=sW_viw2xdO9Klqf31x3YlYcGlhgRj52HV1JFmwrgtb4,4064 +sympy/assumptions/handlers/matrices.py,sha256=Gdauk2xk1hKPRr4i6RpvOMHtDnyVD34x1OyhL-Oh8Hc,22321 +sympy/assumptions/handlers/ntheory.py,sha256=2i-EhgO9q1LfDLzN3BZVzHNfaXSsce131XtBr5TEh2I,7213 +sympy/assumptions/handlers/order.py,sha256=Y6Txiykbj4gkibX0mrcUUlhtRWE27p-4lpG4WACX3Ik,12222 +sympy/assumptions/handlers/sets.py,sha256=2Jh2G6Ce1qz9Imzv5et_v-sMxY62j3rFdnp1UZ_PGB8,23818 +sympy/assumptions/predicates/__init__.py,sha256=q1C7iWpvdDymEUZNyzJvZLsLtgwSkYtCixME-fYyIDw,110 +sympy/assumptions/predicates/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/calculus.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/common.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/matrices.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/ntheory.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/order.cpython-310.pyc,, +sympy/assumptions/predicates/__pycache__/sets.cpython-310.pyc,, +sympy/assumptions/predicates/calculus.py,sha256=vFnlYVYZVd6D9OwA7-3bDK_Q0jf2iCZCZiMlWenw0Vg,1889 +sympy/assumptions/predicates/common.py,sha256=zpByACpa_tF0nVNB0J_rJehnXkHtkxhchn1DvkVVS-s,2279 +sympy/assumptions/predicates/matrices.py,sha256=X3vbkEf3zwJLyanEjf6ijYXuRfFfSv-yatl1tJ25wDk,12142 +sympy/assumptions/predicates/ntheory.py,sha256=wvFNFSf0S4egbY7REw0V0ANC03CuiRU9PLmdi16VfHo,2546 +sympy/assumptions/predicates/order.py,sha256=ZI4u_WfusMPAEsMFawkSN9QvaMwI3-Jt3-U_xIcGl_8,9508 +sympy/assumptions/predicates/sets.py,sha256=anp-DeJaU2nun3K4O71G_fbqpETozSKynRGuLhiO8xI,8937 +sympy/assumptions/refine.py,sha256=GlC16HC3VNtCHFZNul1tnDCNPy-iOPKZBGjpTbTlbh4,11950 +sympy/assumptions/relation/__init__.py,sha256=t2tZNEIK7w-xXshRQIRL8tIyiNe1W5fMhN7QNRPnQFo,261 +sympy/assumptions/relation/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/relation/__pycache__/binrel.cpython-310.pyc,, +sympy/assumptions/relation/__pycache__/equality.cpython-310.pyc,, +sympy/assumptions/relation/binrel.py,sha256=3iwnSEE53-vRsPv-bOnjydgOkCpbB12FTFR_sQ3CwvE,6313 +sympy/assumptions/relation/equality.py,sha256=RbwztgBBVlnfc9-M-IYKonybITSr8WdqWQqwlp2j3V8,7160 +sympy/assumptions/satask.py,sha256=ld_ZWQlxh9R3ElMUBjnqVfwEJ2irPYtJ6vV5mWdzSs0,11280 +sympy/assumptions/sathandlers.py,sha256=Uu_ur8XtxUH5uaAlfGQHEyx2S1-3Q00EFmezDYaGxT0,9428 +sympy/assumptions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/assumptions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_assumptions_2.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_context.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_matrices.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_query.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_refine.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_satask.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_sathandlers.cpython-310.pyc,, +sympy/assumptions/tests/__pycache__/test_wrapper.cpython-310.pyc,, +sympy/assumptions/tests/test_assumptions_2.py,sha256=oNgIDOoW-GpBbXxbtw05SWnE8I7sGislYmB3MDogwB4,1070 +sympy/assumptions/tests/test_context.py,sha256=I5gES7AY9_vz1-CEaCchy4MXABtX85ncNkvoRuLskG8,1153 +sympy/assumptions/tests/test_matrices.py,sha256=nzSofuawc18hNe9Nj0dN_lTeDwa2KbPjt4K2rvb3xmw,12258 +sympy/assumptions/tests/test_query.py,sha256=teHsXTfPw_q4197tXcz2Ov-scVxDHP-T_LpcELmOMnI,97999 +sympy/assumptions/tests/test_refine.py,sha256=bHxYUnCOEIzA1yPU3B2xbU9JZfhDv6RkmPm8esetisQ,8834 +sympy/assumptions/tests/test_satask.py,sha256=IIqqIxzkLfANpTNBKEsCGCp3Bm8zmDnYd23woqKh9EE,15741 +sympy/assumptions/tests/test_sathandlers.py,sha256=jMCZQb3G6pVQ5MHaSTWV_0eULHaCF8Mowu12Ll72rgs,1842 +sympy/assumptions/tests/test_wrapper.py,sha256=iE32j83rrerCz85HHt2hTolgJkqb44KddfEpI3H1Fb8,1159 +sympy/assumptions/wrapper.py,sha256=nZ3StKi-Q0q_HmdwpzZEcE7WQFcVtnB28QBvYe_O220,5514 +sympy/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_discrete_log.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_meijerint.cpython-310.pyc,, +sympy/benchmarks/__pycache__/bench_symbench.cpython-310.pyc,, +sympy/benchmarks/bench_discrete_log.py,sha256=CNchIJ5HFMPpNlVZh2vOU0GgQ3bse6hqyqDovpDHlKE,2473 +sympy/benchmarks/bench_meijerint.py,sha256=dSNdZhoc8a4h50wRtbOxLwpmgUiuMFpe6ytTLURcplY,11610 +sympy/benchmarks/bench_symbench.py,sha256=UMD3eYf_Poht0qxjdH2_axGwwON6cZo1Sp700Ci1M1M,2997 +sympy/calculus/__init__.py,sha256=IWDc6qPbEcWyTm9QM6V8vSAs-5OtGNijimykoWz3Clc,828 +sympy/calculus/__pycache__/__init__.cpython-310.pyc,, +sympy/calculus/__pycache__/accumulationbounds.cpython-310.pyc,, +sympy/calculus/__pycache__/euler.cpython-310.pyc,, +sympy/calculus/__pycache__/finite_diff.cpython-310.pyc,, +sympy/calculus/__pycache__/singularities.cpython-310.pyc,, +sympy/calculus/__pycache__/util.cpython-310.pyc,, +sympy/calculus/accumulationbounds.py,sha256=DpFXDYbjSxx0icrx1HagArBeyVx5aSAX83vYuXSGMRI,28692 +sympy/calculus/euler.py,sha256=0QrHD9TYKlSZuO8drnU3bUFJrSu8v5SncqtkRSWLjGM,3436 +sympy/calculus/finite_diff.py,sha256=X7qZJ5GmHlHKokUUMFoaQqrqX2jLRq4b7W2G5aWntzM,17053 +sympy/calculus/singularities.py,sha256=ctVHpnE4Z7iE6tNAssMWmdXu9qWXOXzVJasLxC-cToQ,11757 +sympy/calculus/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/calculus/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_accumulationbounds.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_euler.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_finite_diff.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_singularities.cpython-310.pyc,, +sympy/calculus/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/calculus/tests/test_accumulationbounds.py,sha256=a_Ry2nKX5WbhSe1Bk2k0W6-VWOpVTg0FnA9u8rNSIV4,11195 +sympy/calculus/tests/test_euler.py,sha256=YWpts4pWSiYEwRsi5DLQ16JgC9109-9NKZIL_IO6_Aw,2683 +sympy/calculus/tests/test_finite_diff.py,sha256=V52uNDNvarcK_FXnWrPZjifFMRWTy_2H4lt3FmvA4W4,7760 +sympy/calculus/tests/test_singularities.py,sha256=zVCHJyjVFw9xpQ_EFCsA33zBGwCQ8gSeLtbLGA9t0uQ,4215 +sympy/calculus/tests/test_util.py,sha256=S5_YEGW0z7xzzthShrSsg2wAmzE9mR4u4Ndzuzw_Gx8,15034 +sympy/calculus/util.py,sha256=ViXMvleQIIStquHN01CpTUPYxu3jgC57GaCOkuXRsoU,26097 +sympy/categories/__init__.py,sha256=XiKBVC6pbDED-OVtNlSH-fGB8dB_jWLqwCEO7wBTAyA,984 +sympy/categories/__pycache__/__init__.cpython-310.pyc,, +sympy/categories/__pycache__/baseclasses.cpython-310.pyc,, +sympy/categories/__pycache__/diagram_drawing.cpython-310.pyc,, +sympy/categories/baseclasses.py,sha256=G3wCiNCgNiTLLFZxGLd2ZFmnsbiRxhapSfZWlWSC508,31411 +sympy/categories/diagram_drawing.py,sha256=W88A89uDs8qKZlxVLqWuqmEOBwTMomtl_u8sFe9wqdU,95500 +sympy/categories/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/categories/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/categories/tests/__pycache__/test_baseclasses.cpython-310.pyc,, +sympy/categories/tests/__pycache__/test_drawing.cpython-310.pyc,, +sympy/categories/tests/test_baseclasses.py,sha256=SwD6QsfSlrEdpD2dbkcN62CPVIRP5SadjCplLrMAoa8,5767 +sympy/categories/tests/test_drawing.py,sha256=IELPpadmnQyQ2x5a5qHC8ioq5kfT1UnAl4h1vO3gbqg,27848 +sympy/codegen/__init__.py,sha256=sQcJsyLyoRh9ccOPhv2eZ-wHjQrArByOON9ndj-MYgQ,974 +sympy/codegen/__pycache__/__init__.cpython-310.pyc,, +sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/algorithms.cpython-310.pyc,, +sympy/codegen/__pycache__/approximations.cpython-310.pyc,, +sympy/codegen/__pycache__/ast.cpython-310.pyc,, +sympy/codegen/__pycache__/cfunctions.cpython-310.pyc,, +sympy/codegen/__pycache__/cnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/cutils.cpython-310.pyc,, +sympy/codegen/__pycache__/cxxnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/fnodes.cpython-310.pyc,, +sympy/codegen/__pycache__/futils.cpython-310.pyc,, +sympy/codegen/__pycache__/matrix_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/numpy_nodes.cpython-310.pyc,, +sympy/codegen/__pycache__/pynodes.cpython-310.pyc,, +sympy/codegen/__pycache__/pyutils.cpython-310.pyc,, +sympy/codegen/__pycache__/rewriting.cpython-310.pyc,, +sympy/codegen/__pycache__/scipy_nodes.cpython-310.pyc,, +sympy/codegen/abstract_nodes.py,sha256=TY4ecftqnym5viYInnb59zGPPFXdeSGQwi--xTz6Pvo,490 +sympy/codegen/algorithms.py,sha256=_isSQBzQzn1xKkYhYEF7nVK1sCa7n78Qo5AoCeNs8eU,5056 +sympy/codegen/approximations.py,sha256=UnVbikz2vjJo8DtE02ipa6ZEsCe5lXOT_r16F5ByW4Q,6447 +sympy/codegen/ast.py,sha256=tBRSHBvDz4_Z_FiFy1d48x1URHPtAVCJUiwQihpc5zA,56374 +sympy/codegen/cfunctions.py,sha256=SGLPIMgGE9o9RhaThTgVcmnFCKbxNZvukqp3uvqv0Vw,11812 +sympy/codegen/cnodes.py,sha256=ZFBxHsRBUcQ14EJRURZXh9EjTsSSJGwmWubfmpE0-p4,2823 +sympy/codegen/cutils.py,sha256=vlzMs8OkC5Bu4sIP-AF2mYf_tIo7Uo4r2DAI_LNhZzM,383 +sympy/codegen/cxxnodes.py,sha256=Om-EBfYduFF97tgXOF68rr8zYbngem9kBRm9SJiKLSM,342 +sympy/codegen/fnodes.py,sha256=P7I-TD-4H4Dr4bxFNS7p46OD9bi32l8SpFEezVWutSY,18931 +sympy/codegen/futils.py,sha256=k-mxMJKr_Q_afTy6NrKNl_N2XQLBmSdZAssO5hBonNY,1792 +sympy/codegen/matrix_nodes.py,sha256=Hhip0cbBj27i-4JwVinkEt4PHRbAIe5ERxwyywoSJm8,2089 +sympy/codegen/numpy_nodes.py,sha256=23inRIlvAF2wzaJGhi1NUg8R7NRbhtDrqICDZN909jw,3137 +sympy/codegen/pynodes.py,sha256=Neo1gFQ9kC31T-gH8TeeCaDDNaDe5deIP97MRZFgMHk,243 +sympy/codegen/pyutils.py,sha256=HfF6SP710Y7yExZcSesI0usVaDiWdEPEmMtyMD3JtOY,838 +sympy/codegen/rewriting.py,sha256=EeSOC-fawTxFiueMIuMlSFPuES_97hhxC2hjoZ_6pPQ,11591 +sympy/codegen/scipy_nodes.py,sha256=hYlxtGyTM0Z64Nazm1TeMZ3Y8dMsiD_HNhNvbU9eiQY,2508 +sympy/codegen/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/codegen/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_abstract_nodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_algorithms.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_applications.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_approximations.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_ast.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cfunctions.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_cxxnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_fnodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_numpy_nodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_pynodes.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_pyutils.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_rewriting.cpython-310.pyc,, +sympy/codegen/tests/__pycache__/test_scipy_nodes.cpython-310.pyc,, +sympy/codegen/tests/test_abstract_nodes.py,sha256=a_GKf3FpeNN8zfMc-V8AaSrQtEI1oiLfJOco2VKiSKI,451 +sympy/codegen/tests/test_algorithms.py,sha256=gvDTHZnC_lZ4Uvt7BTSfjMuDTyM0Bilm-sWMUpSM06I,4700 +sympy/codegen/tests/test_applications.py,sha256=DWDpSsiVQy7S6pjnBSErWxDpPDRRLL8ncTMWWwaI3R4,2189 +sympy/codegen/tests/test_approximations.py,sha256=SZpOUzahb_bJOceD0DLdmeiw-jN37OPmf5TRp1dyRgM,2035 +sympy/codegen/tests/test_ast.py,sha256=aAWk-yAVVNAmFMkyUlYBbVA8mPlTFqULOtmXMEi3LO8,21688 +sympy/codegen/tests/test_cfunctions.py,sha256=EuRwj9U00iLc2--qtY2YD7TpICndQ0gVsCXTYHrIFhQ,4613 +sympy/codegen/tests/test_cnodes.py,sha256=FlI5XP39K3kC1QWKQ-QKkzNQw8TROjj5mKXJhK1UU2c,3039 +sympy/codegen/tests/test_cxxnodes.py,sha256=5OwN8D_ZtKN9z5uNeUwbUkyAGzNLrTgIKUlcRWmOSpE,366 +sympy/codegen/tests/test_fnodes.py,sha256=r206n8YM0D1vFP0vdjUaAR7QRpmUWw8VmqSMFxh8FU8,6643 +sympy/codegen/tests/test_numpy_nodes.py,sha256=VcG7eGVlzx9sSKRp1n9zfK0NjigxY5WOW6F_nQnnnSs,1658 +sympy/codegen/tests/test_pynodes.py,sha256=Gso18KKzSwA-1AHC55SgHPAfH1GrGUCGaN6QR7iuEO0,432 +sympy/codegen/tests/test_pyutils.py,sha256=jr5QGvUP0M1Rr2_7vHTazlMaJOoMHztqFTxT6EkBcb4,285 +sympy/codegen/tests/test_rewriting.py,sha256=ELPziNI3CsJ4VS7mUbk4QWyG_94FbgZCdBKieMN20Vc,15852 +sympy/codegen/tests/test_scipy_nodes.py,sha256=LBWpjTRfgWN5NLTchLZEp6m7IMtu7HbiKoztLc6KNGY,1495 +sympy/combinatorics/__init__.py,sha256=Dx9xakpHuTIgy4G8zVjAY6pTu8J9_K3d_jKPizRMdVo,1500 +sympy/combinatorics/__pycache__/__init__.cpython-310.pyc,, +sympy/combinatorics/__pycache__/coset_table.cpython-310.pyc,, +sympy/combinatorics/__pycache__/fp_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/free_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/galois.cpython-310.pyc,, +sympy/combinatorics/__pycache__/generators.cpython-310.pyc,, +sympy/combinatorics/__pycache__/graycode.cpython-310.pyc,, +sympy/combinatorics/__pycache__/group_constructs.cpython-310.pyc,, +sympy/combinatorics/__pycache__/group_numbers.cpython-310.pyc,, +sympy/combinatorics/__pycache__/homomorphisms.cpython-310.pyc,, +sympy/combinatorics/__pycache__/named_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/partitions.cpython-310.pyc,, +sympy/combinatorics/__pycache__/pc_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc,, +sympy/combinatorics/__pycache__/permutations.cpython-310.pyc,, +sympy/combinatorics/__pycache__/polyhedron.cpython-310.pyc,, +sympy/combinatorics/__pycache__/prufer.cpython-310.pyc,, +sympy/combinatorics/__pycache__/rewritingsystem.cpython-310.pyc,, +sympy/combinatorics/__pycache__/rewritingsystem_fsm.cpython-310.pyc,, +sympy/combinatorics/__pycache__/schur_number.cpython-310.pyc,, +sympy/combinatorics/__pycache__/subsets.cpython-310.pyc,, +sympy/combinatorics/__pycache__/tensor_can.cpython-310.pyc,, +sympy/combinatorics/__pycache__/testutil.cpython-310.pyc,, +sympy/combinatorics/__pycache__/util.cpython-310.pyc,, +sympy/combinatorics/coset_table.py,sha256=A3O5l1tkFmF1mEqiab08eBcR6lAdiqKJ2uPao3Ucvlk,42935 +sympy/combinatorics/fp_groups.py,sha256=QjeCEGBfTBbMZd-WpCOY5iEUyt8O7eJXa3RDLfMC7wk,47800 +sympy/combinatorics/free_groups.py,sha256=OnsEnMF6eehIFdM5m7RHkc9R_LFIahGJL3bAEv1pR6k,39534 +sympy/combinatorics/galois.py,sha256=0kz71xGJDKgJm-9dXr4YTMkfaHPowCUImpK9x-n3VNU,17863 +sympy/combinatorics/generators.py,sha256=vUIe0FgHGVFA5omJH-qHQP6NmqmnuVVV8n2RFnpTrKc,7481 +sympy/combinatorics/graycode.py,sha256=xbtr8AaFYb4SMmwUi7mf7913U87jH-XEYF_3pGZfj0o,11207 +sympy/combinatorics/group_constructs.py,sha256=IKx12_yWJqEQ7g-oBuAWd5VRLbCOWyL0LG4PQu43BS8,2021 +sympy/combinatorics/group_numbers.py,sha256=QuB-EvXmTulg5MuI4aLE3GlmFNTGKulAP-DQW9TBXU4,3073 +sympy/combinatorics/homomorphisms.py,sha256=s8bzIv4liVXwqJT2IuYPseQW4MBW2-zDpdHUXQsf7dU,18828 +sympy/combinatorics/named_groups.py,sha256=zd_C9epKDrMG0drafGUcHuuJJkcMaDt1Nf2ik4NXNq8,8378 +sympy/combinatorics/partitions.py,sha256=ZXqVmVNjmauhMeiTWtCCqOP38b9MJg7UlBdZa-7aICQ,20841 +sympy/combinatorics/pc_groups.py,sha256=IROCLM63p4ATazWsK9qRxmx8bZjoMhWxOrTm0Q5RRpo,21351 +sympy/combinatorics/perm_groups.py,sha256=mhAE82DSVM7x2YoS4ADdwLoWxzuGLVOjeaVGJnz9EY8,185087 +sympy/combinatorics/permutations.py,sha256=2f63LyIytpdDUbPyv44DqcGUJxtbfMEJFpyGuSq4xoY,87647 +sympy/combinatorics/polyhedron.py,sha256=OYRMNVwTxT97p4sG4EScl4a2QnBIvyutIPFBzxAfCLU,35942 +sympy/combinatorics/prufer.py,sha256=v-lHZN2ZhjOTS3_jLjw44Q9F7suS3VdgXThh1Sg6CRI,12086 +sympy/combinatorics/rewritingsystem.py,sha256=XTQUZpLIr6H1UBLao_ni1UAoIMB8V5Bpfp8BBCV9g5c,17097 +sympy/combinatorics/rewritingsystem_fsm.py,sha256=CKGhLqyvxY0mlmy8_Hb4WzkSdWYPUaU2yZYhz-0iZ5w,2433 +sympy/combinatorics/schur_number.py,sha256=YdsyA7n_z9tyfRTSRfIjEjtnGo5EuDGBMUS09AQ2MxU,4437 +sympy/combinatorics/subsets.py,sha256=oxuExuGyFnvunkmktl-vBYiLbiN66A2Q2MyzwWfy46A,16047 +sympy/combinatorics/tensor_can.py,sha256=h6NTaH99oG0g1lVxhShBY2Fc4IwXyMUc0Ih31KI6kFw,40776 +sympy/combinatorics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/combinatorics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_coset_table.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_fp_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_free_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_galois.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_generators.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_graycode.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_group_constructs.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_group_numbers.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_homomorphisms.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_named_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_partitions.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_pc_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_perm_groups.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_permutations.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_polyhedron.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_prufer.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_rewriting.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_schur_number.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_subsets.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_tensor_can.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_testutil.cpython-310.pyc,, +sympy/combinatorics/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/combinatorics/tests/test_coset_table.py,sha256=cEUF0OH6SNhN_kh069wMsq6h4eSVqbDLghrg2r9Ht48,28474 +sympy/combinatorics/tests/test_fp_groups.py,sha256=7ATMwzPvAoWiH7Cex-D63nmlOa20h70zO5TWGVisFwM,9969 +sympy/combinatorics/tests/test_free_groups.py,sha256=h3tPyjMA79M9QMc0rOlgVXU31lZ0s_xoY_YIVsVz0Fg,6161 +sympy/combinatorics/tests/test_galois.py,sha256=w35JRx8lmlXCdzUBNdocgATPYWBOEZ6LH-tAxOPwCQ8,2763 +sympy/combinatorics/tests/test_generators.py,sha256=6YpOp0i5PRGtySPNZseQ8mjSXbwpfGfz0hDB4kfk40Q,3567 +sympy/combinatorics/tests/test_graycode.py,sha256=pI4e7Y615d5Bmmxui6fdEeyca6j6KSD0YmeychV6ORk,2800 +sympy/combinatorics/tests/test_group_constructs.py,sha256=jJLwMdhuUalKv4Aql9SzV2utK8Ex-IYdMecggr95pi8,450 +sympy/combinatorics/tests/test_group_numbers.py,sha256=nRxK4R8Cdq4Ni9e_6n4fRjir3VBOmXMzAIXnlRNQD3Y,989 +sympy/combinatorics/tests/test_homomorphisms.py,sha256=UwBj5loCuZAiuvmqy5VAbwhCQTph8o6BzTaGrH0rzB4,3745 +sympy/combinatorics/tests/test_named_groups.py,sha256=tsuDVGv4iHGEZ0BVR87_ENhyAfZvFIl0M6Dv_HX1VoY,1931 +sympy/combinatorics/tests/test_partitions.py,sha256=oppszKJLLSpcEzHgespIveSmEC3fDZ0qkus1k7MBt4E,4097 +sympy/combinatorics/tests/test_pc_groups.py,sha256=wfkY_ilpG0XWrhaWMVK6r7yWMeXfM8WNTyti5oE9bdk,2728 +sympy/combinatorics/tests/test_perm_groups.py,sha256=t-bERPQXU4pKAEHR3caHemGMnQ2qh9leIOz0-hB8vjo,41191 +sympy/combinatorics/tests/test_permutations.py,sha256=IfOxSCY18glt_8lqovnjtXyz9OX02ZQaUE47aCUzKIA,20149 +sympy/combinatorics/tests/test_polyhedron.py,sha256=3SWkFQKeF-p1QWP4Iu9NIA1oTxAFo1BLRrrLerBFAhw,4180 +sympy/combinatorics/tests/test_prufer.py,sha256=OTJp0NxjiVswWkOuCIlnGFU2Gw4noRsrPpUJtp2XhEs,2649 +sympy/combinatorics/tests/test_rewriting.py,sha256=3COHq74k6knt2rqE7hfd4ZP_6whf0Kg14tYxFmTtYrI,1787 +sympy/combinatorics/tests/test_schur_number.py,sha256=wg13uTumFltWIGbVg_PEr6nhXIru19UWitsEZiakoRI,1727 +sympy/combinatorics/tests/test_subsets.py,sha256=6pyhLYV5HuXvx63r-gGVHr8LSrGRXcpDudhFn9fBqX8,2635 +sympy/combinatorics/tests/test_tensor_can.py,sha256=olH5D5wwTBOkZXjtqvLO6RKbvCG9KoMVK4__wDe95N4,24676 +sympy/combinatorics/tests/test_testutil.py,sha256=uJlO09XgD-tImCWu1qkajiC07rK3GoN91v3_OqT5-qo,1729 +sympy/combinatorics/tests/test_util.py,sha256=sOYMWHxlbM0mqalqA7jNrYMm8DKcf_GwL5YBjs96_C4,4499 +sympy/combinatorics/testutil.py,sha256=Nw0En7kI9GMjca287aht1HNaTjBFv8ulq0E1rgtpO6Q,11152 +sympy/combinatorics/util.py,sha256=LIu_8__RKMv8EfXAfkr08UKYSMq5hGJBLHyDSS5nd-8,16297 +sympy/concrete/__init__.py,sha256=2HDmg3VyLgM_ZPw3XsGpkOClGiQnyTlUNHSwVTtizA0,144 +sympy/concrete/__pycache__/__init__.cpython-310.pyc,, +sympy/concrete/__pycache__/delta.cpython-310.pyc,, +sympy/concrete/__pycache__/expr_with_intlimits.cpython-310.pyc,, +sympy/concrete/__pycache__/expr_with_limits.cpython-310.pyc,, +sympy/concrete/__pycache__/gosper.cpython-310.pyc,, +sympy/concrete/__pycache__/guess.cpython-310.pyc,, +sympy/concrete/__pycache__/products.cpython-310.pyc,, +sympy/concrete/__pycache__/summations.cpython-310.pyc,, +sympy/concrete/delta.py,sha256=xDtz1yXnd-WRIu3nnJFBIrA01PLOUT3XU1znPeVATU0,9958 +sympy/concrete/expr_with_intlimits.py,sha256=vj4PjttB9xE5aUYu37R1A4_KtGgxcPa65jzjv8-krsc,11352 +sympy/concrete/expr_with_limits.py,sha256=txn7gbh-Yqw0-ZBGvN9iFNsPW13wD2z7alf8EyQVZ4U,21832 +sympy/concrete/gosper.py,sha256=3q8gkZz_oAeBOBUfObMvwArBkBKYReHR0prVXMIqrNE,5557 +sympy/concrete/guess.py,sha256=Ha12uphLNfo3AbfsGy85JsPxhbiAXJemwpz9QXRtp48,17472 +sympy/concrete/products.py,sha256=s6E_Z0KuHx8MzbJzaJo2NP5aTpgIo3-oqGwgYh_osnE,18608 +sympy/concrete/summations.py,sha256=jhmU5WCz98Oon3oosHUsM8sp6ErjPGCz25rbKn5hqS8,55371 +sympy/concrete/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/concrete/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_delta.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_gosper.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_guess.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_products.cpython-310.pyc,, +sympy/concrete/tests/__pycache__/test_sums_products.cpython-310.pyc,, +sympy/concrete/tests/test_delta.py,sha256=uI7xjMx7JuVb3kkN7cLR6_pGsKS4Ulq22p-Z9oti5Jc,23869 +sympy/concrete/tests/test_gosper.py,sha256=ZHiZfYGCeCS9I-0oqN6sFbiYa-284GeFoGsNbhIWq4I,7987 +sympy/concrete/tests/test_guess.py,sha256=TPW6Hy11Po6VLZG_dx95x3sMBYl5kcQH8wjJ6TOtu-k,3370 +sympy/concrete/tests/test_products.py,sha256=caYc-xlEIrX9I_A-KPQdwp5oDprVJSbfcOaKg_qUnsM,14521 +sympy/concrete/tests/test_sums_products.py,sha256=0ti3g4D8hBpvpsSrc2CYIRxVwqLORKO5K88offDwKfM,64458 +sympy/conftest.py,sha256=3vg-GlDw8Y8MGoa324FoRJR3HaRaJhZpiXdTTVoNAoI,2245 +sympy/core/__init__.py,sha256=LQBkB1S-CYmQ3P24ei_kHcsMwtbDobn3BqzJQ-rJ1Hs,3050 +sympy/core/__pycache__/__init__.cpython-310.pyc,, +sympy/core/__pycache__/_print_helpers.cpython-310.pyc,, +sympy/core/__pycache__/add.cpython-310.pyc,, +sympy/core/__pycache__/alphabets.cpython-310.pyc,, +sympy/core/__pycache__/assumptions.cpython-310.pyc,, +sympy/core/__pycache__/assumptions_generated.cpython-310.pyc,, +sympy/core/__pycache__/backend.cpython-310.pyc,, +sympy/core/__pycache__/basic.cpython-310.pyc,, +sympy/core/__pycache__/cache.cpython-310.pyc,, +sympy/core/__pycache__/compatibility.cpython-310.pyc,, +sympy/core/__pycache__/containers.cpython-310.pyc,, +sympy/core/__pycache__/core.cpython-310.pyc,, +sympy/core/__pycache__/coreerrors.cpython-310.pyc,, +sympy/core/__pycache__/decorators.cpython-310.pyc,, +sympy/core/__pycache__/evalf.cpython-310.pyc,, +sympy/core/__pycache__/expr.cpython-310.pyc,, +sympy/core/__pycache__/exprtools.cpython-310.pyc,, +sympy/core/__pycache__/facts.cpython-310.pyc,, +sympy/core/__pycache__/function.cpython-310.pyc,, +sympy/core/__pycache__/kind.cpython-310.pyc,, +sympy/core/__pycache__/logic.cpython-310.pyc,, +sympy/core/__pycache__/mod.cpython-310.pyc,, +sympy/core/__pycache__/mul.cpython-310.pyc,, +sympy/core/__pycache__/multidimensional.cpython-310.pyc,, +sympy/core/__pycache__/numbers.cpython-310.pyc,, +sympy/core/__pycache__/operations.cpython-310.pyc,, +sympy/core/__pycache__/parameters.cpython-310.pyc,, +sympy/core/__pycache__/power.cpython-310.pyc,, +sympy/core/__pycache__/random.cpython-310.pyc,, +sympy/core/__pycache__/relational.cpython-310.pyc,, +sympy/core/__pycache__/rules.cpython-310.pyc,, +sympy/core/__pycache__/singleton.cpython-310.pyc,, +sympy/core/__pycache__/sorting.cpython-310.pyc,, +sympy/core/__pycache__/symbol.cpython-310.pyc,, +sympy/core/__pycache__/sympify.cpython-310.pyc,, +sympy/core/__pycache__/trace.cpython-310.pyc,, +sympy/core/__pycache__/traversal.cpython-310.pyc,, +sympy/core/_print_helpers.py,sha256=GQo9dI_BvAJtYHVFFfmroNr0L8d71UeI-tU7SGJgctk,2388 +sympy/core/add.py,sha256=9VDeDODPv3Y72EWa4Xiypy3i67DzbNlPUYAEZXhEwEw,43747 +sympy/core/alphabets.py,sha256=vWBs2atOvfRK6Xfg6hc5IKiB7s_0sZIiVJpcCUJL0N4,266 +sympy/core/assumptions.py,sha256=P7c11DL5VD_94v1Dc5LofIy6Atrth7FZp03rDr4ftQ4,23582 +sympy/core/assumptions_generated.py,sha256=0TJKYIHSIFyQcVHZdIHZ19b7tqst_sY7iZwjKzcvZBM,42817 +sympy/core/backend.py,sha256=AUgGtYmz0mIoVmjKVMAa5ZzlC1p5anxk-N4Sy7pePNo,3842 +sympy/core/basic.py,sha256=1wRiJLAILhJK2uVTAtuxlCFWKXCKT-PECXve4rfXWs0,72857 +sympy/core/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/core/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_arit.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_assumptions.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_basic.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_expand.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_numbers.cpython-310.pyc,, +sympy/core/benchmarks/__pycache__/bench_sympify.cpython-310.pyc,, +sympy/core/benchmarks/bench_arit.py,sha256=gfrnvKSXLCaUoFFxMgJhnLUp7rG9Pa_YT7OKgOrPP8E,412 +sympy/core/benchmarks/bench_assumptions.py,sha256=evfZzTgOUUvvvlK0DRdDZQRqxIlGLfJYzKu8QDMxSks,177 +sympy/core/benchmarks/bench_basic.py,sha256=YF0tTJ_AN_Wz11qidzM4bIhlwEhEqVc-IGVGrUx6SaA,210 +sympy/core/benchmarks/bench_expand.py,sha256=xgQYQMwqgXJtKajM4JVhuL-7AW8TLY-vdBpO6uyMDoQ,427 +sympy/core/benchmarks/bench_numbers.py,sha256=fvcbOkslXdADqiX_amiL-BEUtrXBfdiTZeOtbiI2auI,1105 +sympy/core/benchmarks/bench_sympify.py,sha256=G5iGInhhbkkxSY2pS08BNG945m9m4eZlNT1aJutGt5M,138 +sympy/core/cache.py,sha256=AyG7kganyV0jVx-aNBEUFogqRLHQqqFn8xU3ZSfJoaM,6172 +sympy/core/compatibility.py,sha256=XQH7ezmRi6l3R23qMHN2wfA-YMRWbh2YYjPY7LRo3lo,1145 +sympy/core/containers.py,sha256=ic6uSNItz5JgL8Dx8T87gcnpiGwOxvf6FaQVgIRWWoo,11315 +sympy/core/core.py,sha256=3pIrJokfb2Rn8S2XudM3JyQVEqY1vZhSEZ-1tkUmqYg,1797 +sympy/core/coreerrors.py,sha256=OKpJwk_yE3ZMext49R-QwtTudZaXZbmTspaq1ZMMpAU,272 +sympy/core/decorators.py,sha256=de6eYm3D_YdEW1rEKOIES_aEyvbjqRM98I67l8QGGVU,8217 +sympy/core/evalf.py,sha256=HL9frdDL3OXiF08CXISADkmCx7_KjcAt_nYu4m_IKyM,61889 +sympy/core/expr.py,sha256=_lGEDOkQX57uMh275-NGY3Mus6lrQP-cCW_b6xngy_w,142568 +sympy/core/exprtools.py,sha256=mCUxyyQZDSceU7eHPxV3C0mBUWI4a2Qz_LhZxJ5FXY8,51459 +sympy/core/facts.py,sha256=54pFKhJwEzU8LkO7rL25TwGjIb5y5CvZleHEy_TpD68,19546 +sympy/core/function.py,sha256=TuxxpFyc9y5s5dQH3hZnjEovhoZM0nDQNPjfKw5I4ug,115552 +sympy/core/kind.py,sha256=9kQvtDxm-SSRGi-155XsBl_rs-oN_7dw7fNNT3mDu2Q,11540 +sympy/core/logic.py,sha256=Ai2_N-pUmHngJN3usiMTNO6kfLWFVQa3WOet3VhehE8,10865 +sympy/core/mod.py,sha256=survk3e5EyNifVHKpqLZ5NUobFdS0-wEYN4XoUkzMI8,7484 +sympy/core/mul.py,sha256=d7TAZK5YQWT7dsHt84y-2K9Q17FUxi6ilpfgd0GPZ30,78458 +sympy/core/multidimensional.py,sha256=NWX1okybO_nZCl9IhIOE8QYalY1WoC0zlzsvBg_E1eE,4233 +sympy/core/numbers.py,sha256=yNkmRw8ehaQWREJAYv61YP2pGkXy1yAo7ehGrXTVamY,139169 +sympy/core/operations.py,sha256=vasCAsT4aU9XJxfrEGjL-zeVIl2FsI1ktzVtPaJq_0c,25185 +sympy/core/parameters.py,sha256=09LVewtoOyKABQvYeMaJuc-HG7TjJusyT_WMw5NQDDs,3733 +sympy/core/power.py,sha256=WYVmJPNPFsaxeec2D2M_Tb9vUrIG3K8CiAqHca1YVPE,77148 +sympy/core/random.py,sha256=miFdVpNKfutbkpYiIOzG9kVNUm5GTk-_nnmQqUhVDZs,6647 +sympy/core/relational.py,sha256=XcPZ8xUKl8pMAcGk9OBYssCcTH-7lueak2WrsTpzs8g,50608 +sympy/core/rules.py,sha256=AJuZztmYKZ_yUITLZB6rhZjDy6ROBCtajcYqPa50sjc,1496 +sympy/core/singleton.py,sha256=0TrQk5Q4U-GvSXTe4Emih6B2JJg2WMu_u0pSj92wqVA,6542 +sympy/core/sorting.py,sha256=ynZfmQPXWq5Te6WOz6CzaR8crlJfcfKTP24gzVf-QF0,10671 +sympy/core/symbol.py,sha256=eciLIZCLMlmBKBF5XcJqVRYXf2Z3M13kQ3dJ_-ok43g,28555 +sympy/core/sympify.py,sha256=pZuEWvH-kcUGNq0epaVm11G8cmXZQtMyoeoywBVcbYU,20399 +sympy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/core/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_args.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_arit.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_assumptions.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_basic.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_cache.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_compatibility.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_complex.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_constructor_postprocessor.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_containers.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_count_ops.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_diff.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_equal.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_eval.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_evalf.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_expand.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_expr.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_exprtools.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_facts.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_function.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_kind.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_logic.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_match.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_multidimensional.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_noncommutative.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_numbers.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_operations.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_parameters.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_power.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_priority.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_random.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_relational.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_rules.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_singleton.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_sorting.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_subs.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_symbol.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_sympify.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_traversal.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_truediv.cpython-310.pyc,, +sympy/core/tests/__pycache__/test_var.cpython-310.pyc,, +sympy/core/tests/test_args.py,sha256=IeGS8dWg2nM8LncK-_XH4yuCyoBjSIHgemDGEpiVEnc,178389 +sympy/core/tests/test_arit.py,sha256=DwlTHtg2BllVwn0lGNJs89TsKgeAf7wdrXCZR7BkfGo,77847 +sympy/core/tests/test_assumptions.py,sha256=MjJdF_ymVL6mtgQx-aSr_rsNNxaTi2pHFLjyaPCBq5Q,41573 +sympy/core/tests/test_basic.py,sha256=cgAhl2-bLXBkx2EaV5KtnY7-MKOEL9Mov25JUoAmLSo,9496 +sympy/core/tests/test_cache.py,sha256=p6Ci75a_T-bBXE_5HVxRKla62uSay_0Vuf57gUuH6sI,2001 +sympy/core/tests/test_compatibility.py,sha256=7pvNUEGIcRrfWl3doqHlm3AdNkGlcChO69gos3Fk09A,240 +sympy/core/tests/test_complex.py,sha256=koNGFMt6UMmzahJADSja_eD24gr-GG5gGCtyDgCRtPI,21906 +sympy/core/tests/test_constructor_postprocessor.py,sha256=0d7vbVuKi3GCm3PKLtiNqv_Au7v6RYt1rzRdHiD08tM,2441 +sympy/core/tests/test_containers.py,sha256=bFaqu8Bu82-rpgpNEPU4-R3rGwhqNdlLlWCqtHsBqN0,7434 +sympy/core/tests/test_count_ops.py,sha256=eIA2WvCuWKXVBJEGfWoJrn6WfUshX_NXttrrfyLbNnI,5665 +sympy/core/tests/test_diff.py,sha256=6j4Vk9UCNRv8Oyx_4iv1ePjocwBg7_-3ftrSJ8u0cPo,5421 +sympy/core/tests/test_equal.py,sha256=RoOJuu4kMe4Rkk7eNyVOJov5S1770YHiVAiziNIKd2o,1678 +sympy/core/tests/test_eval.py,sha256=o0kZn3oaMidVYdNjeZYtx4uUKBoE3A2tWn2NS4hu72Q,2366 +sympy/core/tests/test_evalf.py,sha256=ShOta18xc-jFlSnnlHhyWsDumLyQRr91YiC1j_gL9Sw,28307 +sympy/core/tests/test_expand.py,sha256=-Rl7sRQevvVBMck3jSA8kg6jgvWeI2yxh9cbSuy0fOA,13383 +sympy/core/tests/test_expr.py,sha256=RRZ7r-AltCCz7Cxfun8is5xVVUklXjbBfDVDoFopAf0,76520 +sympy/core/tests/test_exprtools.py,sha256=L7fi319z1EeFag6pH8myqDQYQ32H193QLKMdqlxACsY,19021 +sympy/core/tests/test_facts.py,sha256=YEZMZ-116VFnFqJ48h9bQsF2flhiB65trnZvJsRSh_o,11579 +sympy/core/tests/test_function.py,sha256=vVoXYyGzdTO3EtlRu0sONxjB3fprXxZ7_9Ve6HdH84s,51420 +sympy/core/tests/test_kind.py,sha256=NLJbwCpugzlNbaSyUlbb6NHoT_9dHuoXj023EDQMrNI,2048 +sympy/core/tests/test_logic.py,sha256=_YKSIod6Q0oIz9lDs78UQQrv9LU-uKaztd7w8LWwuwY,5634 +sympy/core/tests/test_match.py,sha256=2ewD4Ao9cYNvbt2TAId8oZCU0GCNWsSDx4qO5-_Xhwc,22716 +sympy/core/tests/test_multidimensional.py,sha256=Fr-lagme3lwLrBpdaWP7O7oPezhIatn5X8fYYs-8bN8,848 +sympy/core/tests/test_noncommutative.py,sha256=IkGPcvLO4ACVj5LMT2IUgyj68F1RBvMKbm01iqTOK04,4436 +sympy/core/tests/test_numbers.py,sha256=AgFd3RJAMakI6AxCDzfOrGgSX7UeAjxvPHs3Rzk2ns4,75434 +sympy/core/tests/test_operations.py,sha256=mRxftKlrxxrn3zS3UPwqkF6Nr15l5Cv6j3c2RJX46s4,2859 +sympy/core/tests/test_parameters.py,sha256=lRZSShirTW7GRfYgU3A3LRlW79xEPqi62XtoJeaMuDs,2799 +sympy/core/tests/test_power.py,sha256=LptUWHOYrFfNg1-8cNEMxDoQzCdDtguihgVoGb0QC9M,24434 +sympy/core/tests/test_priority.py,sha256=g9dGW-qT647yL4uk1D_v3M2S8rgV1Wi4JBUFyTSwUt4,3190 +sympy/core/tests/test_random.py,sha256=H58NfH5BYeQ3RIscbDct6SZkHQVRJjichVUSuSrhvAU,1233 +sympy/core/tests/test_relational.py,sha256=jebPjr32VQsL-W3laOMxKuYkyo9SFpkdXrTFfqDL3e4,42972 +sympy/core/tests/test_rules.py,sha256=iwmMX7hxC_73CuX9BizeAci-cO4JDq-y1sicKBXEGA4,349 +sympy/core/tests/test_singleton.py,sha256=xLJJgXwmkbKhsot_qTs-o4dniMjHUh3_va0xsA5h-KA,3036 +sympy/core/tests/test_sorting.py,sha256=6BZKYqUedAR-jeHcIgsJelJHFWuougml2c1NNilxGZg,902 +sympy/core/tests/test_subs.py,sha256=7ITJFDplgWBRImkcHfjRdnHqaKgjTxWb4j4WoRysvR8,30106 +sympy/core/tests/test_symbol.py,sha256=zYhPWsdyQp7_NiLVthpoCB1RyP9pmJcNlTdTN2kMdfY,13043 +sympy/core/tests/test_sympify.py,sha256=gVUNWYtarpDrx3vk4r0Vjnrijr21YgHUUSfJmeyabCo,27866 +sympy/core/tests/test_traversal.py,sha256=cmgvMW8G-LZ20ZXy-wg5Vz5ogI_oq2p2bJSwMy9IMF0,4311 +sympy/core/tests/test_truediv.py,sha256=RYfJX39-mNhekRE3sj5TGFZXKra4ML9vGvObsRYuD3k,854 +sympy/core/tests/test_var.py,sha256=hexP-0q2nN9h_dyhKLCuvqFXgLC9e_Hroni8Ldb16Ko,1594 +sympy/core/trace.py,sha256=9WC8p3OpBL6TdHmZWMDK9jaCG-16f4uZV2VptduVH98,348 +sympy/core/traversal.py,sha256=M-ZMt-DRUgyZed_I1gikxEbSYEJLwi7mwpjd-_iFKC8,8962 +sympy/crypto/__init__.py,sha256=i8GcbScXhIPbMEe7uuMgXqh_cU2mZm2f6hspIgmW5uM,2158 +sympy/crypto/__pycache__/__init__.cpython-310.pyc,, +sympy/crypto/__pycache__/crypto.cpython-310.pyc,, +sympy/crypto/crypto.py,sha256=Qb0O_f78q-CtHabvHS7VRJmncbkuqowWTF3_drmMgxI,89426 +sympy/crypto/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/crypto/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/crypto/tests/__pycache__/test_crypto.cpython-310.pyc,, +sympy/crypto/tests/test_crypto.py,sha256=-GJYezqcuQ3KUq_IqCEJAWa-zWAPWFku2WdLj7Aonrc,19763 +sympy/diffgeom/__init__.py,sha256=cWj4N7AfNgrYcGIBexX-UrWxfd1bP9DTNqUmLWUJ9nA,991 +sympy/diffgeom/__pycache__/__init__.cpython-310.pyc,, +sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc,, +sympy/diffgeom/__pycache__/rn.cpython-310.pyc,, +sympy/diffgeom/diffgeom.py,sha256=CCkZEwNcJYrmhyuBVr94KwMFjHsbL6mOJZ2f5aGcARU,72322 +sympy/diffgeom/rn.py,sha256=kvgth6rNJWt94kzVospZwiH53C-s4VSiorktQNmMobQ,6264 +sympy/diffgeom/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc,, +sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc,, +sympy/diffgeom/tests/test_class_structure.py,sha256=LbRyxhhp-NnnfJ2gTn1SdlgCBQn2rhyB7xApOgcd_rM,1048 +sympy/diffgeom/tests/test_diffgeom.py,sha256=3BepCr6ned-4C_3me4zScu06HXG9Qx_dBBxIpiXAvy4,14145 +sympy/diffgeom/tests/test_function_diffgeom_book.py,sha256=0YU63iHyY6O-4LR9lRS5kLZMpcMpuNxEsgqtXALV7ic,5258 +sympy/diffgeom/tests/test_hyperbolic_space.py,sha256=c4xQJ_bBS4xrMj3pfx1Ms3oC2_LwuJuNYXNZxs-cVG8,2598 +sympy/discrete/__init__.py,sha256=A_Seud0IRr2gPYlz6JMQZa3sBhRL3O7gVqhIvMRRvE0,772 +sympy/discrete/__pycache__/__init__.cpython-310.pyc,, +sympy/discrete/__pycache__/convolutions.cpython-310.pyc,, +sympy/discrete/__pycache__/recurrences.cpython-310.pyc,, +sympy/discrete/__pycache__/transforms.cpython-310.pyc,, +sympy/discrete/convolutions.py,sha256=xeXCLxPSpBNfrKNlPGGpuU3D9Azf0uR01OpDGCOAALg,14505 +sympy/discrete/recurrences.py,sha256=FqU5QG4qNNLSVBqcpL7HtKa7rQOlmHMXDQRzHZ_P_s0,5124 +sympy/discrete/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/discrete/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_convolutions.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_recurrences.cpython-310.pyc,, +sympy/discrete/tests/__pycache__/test_transforms.cpython-310.pyc,, +sympy/discrete/tests/test_convolutions.py,sha256=m6LrKCMIeNeuicfuMMFG3-Ke-7oyjTsD1QRbKdTRVYk,16626 +sympy/discrete/tests/test_recurrences.py,sha256=s5ZEZQ262gcnBLpCjJVmeKlTKQByRTQBrc-N9p_4W8c,3019 +sympy/discrete/tests/test_transforms.py,sha256=vEORFaPvxmPSsw0f4Z2hLEN1wD0FdyQOYHDEY9aVm5A,5546 +sympy/discrete/transforms.py,sha256=lf-n6IN881uCfTUAxPNjdUaSguiRbYW0omuR96vKNlE,11681 +sympy/external/__init__.py,sha256=C6s4654Elc_X-D9UgI2cUQWiQyGDt9LG3IKUc8qqzuo,578 +sympy/external/__pycache__/__init__.cpython-310.pyc,, +sympy/external/__pycache__/gmpy.cpython-310.pyc,, +sympy/external/__pycache__/importtools.cpython-310.pyc,, +sympy/external/__pycache__/pythonmpq.cpython-310.pyc,, +sympy/external/gmpy.py,sha256=V3Z0HQyg7SOgviwOvBik8dUtSxO6yiNqFqjARnjTO3I,2982 +sympy/external/importtools.py,sha256=Q7tS2cdGZ9a4NI_1sgGuoVcSDv_rIk-Av0BpFTa6EzA,7671 +sympy/external/pythonmpq.py,sha256=WOMTvHxYLXNp_vQ1F3jE_haeRlnGicbRlCTOp4ZNuo8,11243 +sympy/external/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/external/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_autowrap.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_codegen.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_importtools.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_numpy.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_pythonmpq.cpython-310.pyc,, +sympy/external/tests/__pycache__/test_scipy.cpython-310.pyc,, +sympy/external/tests/test_autowrap.py,sha256=tRDOkHdndNTmsa9sGjlZ1lFIh1rL2Awck4ec1iolb7c,9755 +sympy/external/tests/test_codegen.py,sha256=zOgdevzcR5pK73FnXe3Su_2D6cuvrkP2FMqsro83G-c,12676 +sympy/external/tests/test_importtools.py,sha256=KrfontKYv11UvpazQ0vS1qyhxIvgZrCOXh1JFeACjeo,1394 +sympy/external/tests/test_numpy.py,sha256=7-YWZ--nbVX0h_rzah18AEjiz7JyvEzjHtklhwaAGhI,10123 +sympy/external/tests/test_pythonmpq.py,sha256=L_FdZmmk5N-VEivE_O_qZa98BZhT1WSxRfdmG817bA0,5797 +sympy/external/tests/test_scipy.py,sha256=CVaw7D0-6DORgg78Q6b35SNKn05PlKwWJuqXOuU-qdY,1172 +sympy/functions/__init__.py,sha256=fxnbVbZruEHXQxB5DaQTC6k1Qi8BrWaQ3LwBuSZZryk,5229 +sympy/functions/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/__init__.py,sha256=WqXI3qU_TTJ7nJA8m3Z-7ZAYKoApT8f9Xs0u2bTwy_c,53 +sympy/functions/combinatorial/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/__pycache__/factorials.cpython-310.pyc,, +sympy/functions/combinatorial/__pycache__/numbers.cpython-310.pyc,, +sympy/functions/combinatorial/factorials.py,sha256=OkQ_U2FhDCU0wnpLWyK4f6HMup-EAxh1fsQns74hYjE,37546 +sympy/functions/combinatorial/numbers.py,sha256=iXGk2kGB866puhbfk49KfFogYW8lUVTk_tm_nQw_gg4,83429 +sympy/functions/combinatorial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/combinatorial/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/combinatorial/tests/__pycache__/test_comb_factorials.cpython-310.pyc,, +sympy/functions/combinatorial/tests/__pycache__/test_comb_numbers.cpython-310.pyc,, +sympy/functions/combinatorial/tests/test_comb_factorials.py,sha256=aM7qyHno3THToCxy2HMo1SJlINm4Pj7SjoLtALl6DJ0,26176 +sympy/functions/combinatorial/tests/test_comb_numbers.py,sha256=COdo810q8vjVyHiOYsgD5TcAE4G3bQUzQXlEroDWsj0,34317 +sympy/functions/elementary/__init__.py,sha256=Fj8p5qE-Rr1lqAyHI0aSgC3RYX56O-gWwo6wu-eUQYA,50 +sympy/functions/elementary/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/_trigonometric_special.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/complexes.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/exponential.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/hyperbolic.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/integers.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/miscellaneous.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/piecewise.cpython-310.pyc,, +sympy/functions/elementary/__pycache__/trigonometric.cpython-310.pyc,, +sympy/functions/elementary/_trigonometric_special.py,sha256=PiQ1eg280vWAnSaMMw6RheEJI0oIiwYa4K_sHmUWEgc,7245 +sympy/functions/elementary/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/elementary/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/benchmarks/__pycache__/bench_exp.cpython-310.pyc,, +sympy/functions/elementary/benchmarks/bench_exp.py,sha256=PFBYa9eMovH5XOFN5XTxWr1VDj1EBoKwn4mAtj-_DdM,185 +sympy/functions/elementary/complexes.py,sha256=wwyEdwEaTyps_ZPEA667W7b_VLdYwaZ2cdE2vd5d5NI,43263 +sympy/functions/elementary/exponential.py,sha256=UrXHbvLi3r-uxLw_XYWiEUAnWVF5agcgDDkqWyA_r5Q,42694 +sympy/functions/elementary/hyperbolic.py,sha256=YEnCb_IbSgyUxicldCV61qCcPTrPt-eTexR_c6LRpv8,66628 +sympy/functions/elementary/integers.py,sha256=hM3NvuUHfTH-V8tGHc2ocOwGyXhsLe1gWO_8KJGw0So,19074 +sympy/functions/elementary/miscellaneous.py,sha256=TAIoqthhfqx_wlcNbDdDHpLQrosWxX_nGy48BJk3R_w,27933 +sympy/functions/elementary/piecewise.py,sha256=o8y2TUKcn9varebhrcZSQQg-DOqjJHR2aP02CohgDEo,57858 +sympy/functions/elementary/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/elementary/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_complexes.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_exponential.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_hyperbolic.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_integers.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_interface.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_miscellaneous.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_piecewise.cpython-310.pyc,, +sympy/functions/elementary/tests/__pycache__/test_trigonometric.cpython-310.pyc,, +sympy/functions/elementary/tests/test_complexes.py,sha256=nUSm7w9s2H_F1g8FB841ZoL0skV95PGV5w4_x8Ygh3Q,33513 +sympy/functions/elementary/tests/test_exponential.py,sha256=r8pqvffIEsu8K8VKeXCSsH4IXUJKzDa2wdx-pClsdmk,29566 +sympy/functions/elementary/tests/test_hyperbolic.py,sha256=gz7Is98WR0hCrZwDkocpi2CYWn6FqX11OzGCtpzvbZI,53361 +sympy/functions/elementary/tests/test_integers.py,sha256=g7FE4C8d8BuyZApycbQbq5uPs81eyR_4YdwP6A2P1Gc,20930 +sympy/functions/elementary/tests/test_interface.py,sha256=dBHnagyfDEXsQWlxVzWpqgCBdiJM0oUIv2QONbEYo9s,2054 +sympy/functions/elementary/tests/test_miscellaneous.py,sha256=eCL30UmsusBhjvqICQNmToa1aJTML8fXav1L1J6b7FU,17148 +sympy/functions/elementary/tests/test_piecewise.py,sha256=OOSlqsR7ZZG7drmSO7v5PlrPcbrqpv7sEt6h8pLNYyU,61520 +sympy/functions/elementary/tests/test_trigonometric.py,sha256=xsf5N30ILb_mdpx6Cb5E0o1QY5V4impDX2wqANJnXBE,86394 +sympy/functions/elementary/trigonometric.py,sha256=gnerAnDl9qfqxzvhMr2E5tRdq1GiBfdut6OLxRwuwTc,113966 +sympy/functions/special/__init__.py,sha256=5pjIq_RVCMsuCe1b-FlwIty30KxoUowZYKLmpIT9KHQ,59 +sympy/functions/special/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/__pycache__/bessel.cpython-310.pyc,, +sympy/functions/special/__pycache__/beta_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/bsplines.cpython-310.pyc,, +sympy/functions/special/__pycache__/delta_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/elliptic_integrals.cpython-310.pyc,, +sympy/functions/special/__pycache__/error_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/gamma_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/hyper.cpython-310.pyc,, +sympy/functions/special/__pycache__/mathieu_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/polynomials.cpython-310.pyc,, +sympy/functions/special/__pycache__/singularity_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/spherical_harmonics.cpython-310.pyc,, +sympy/functions/special/__pycache__/tensor_functions.cpython-310.pyc,, +sympy/functions/special/__pycache__/zeta_functions.cpython-310.pyc,, +sympy/functions/special/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/special/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/benchmarks/__pycache__/bench_special.cpython-310.pyc,, +sympy/functions/special/benchmarks/bench_special.py,sha256=wzAoKTccuEaG4xrEYTlYfIJuLi3kUTMTEJ9iA113Wog,164 +sympy/functions/special/bessel.py,sha256=3q5Ti0vVqSPQZ9oSZovJNAviFWuOXLUMbJvpRkdTxWs,63415 +sympy/functions/special/beta_functions.py,sha256=NXwFSRAtpoVkSybCUqicQDKqc8SNBeq3SOB1QS-Ge84,12603 +sympy/functions/special/bsplines.py,sha256=GxW_6tXuiuWap-pc4T0v1PMcfw8FXaq3mSEf50OkLoU,10152 +sympy/functions/special/delta_functions.py,sha256=NPneFMqLdwwMGZweS5C-Bok6ch1roYyO481ZNOiWp8I,19866 +sympy/functions/special/elliptic_integrals.py,sha256=rn4asENf-mFTc-iTpMOht-E-q_-vmhNc0Bd4xMPGfOE,14694 +sympy/functions/special/error_functions.py,sha256=syaTdbOA7xJBtMuuDSFZsOerSc2-Z5pm77SQ7Qn_eCU,77081 +sympy/functions/special/gamma_functions.py,sha256=OjPRUlD9wXr0XfBhn3Ocbwpey7Qd0H1JPyHeZkevxSc,42596 +sympy/functions/special/hyper.py,sha256=aby7IOWh0OtlCclHWv0cz3-cqKvuSIVHvQ8qFgOtQs8,37290 +sympy/functions/special/mathieu_functions.py,sha256=-3EsPJHwU1upnYz5rsc1Zy43aPpjXD1Nnmn2yA9LS6U,6606 +sympy/functions/special/polynomials.py,sha256=PBrr6UpHvs_FtYsTD_y2jre2tYNcqneOGwkm1omY2jk,46718 +sympy/functions/special/singularity_functions.py,sha256=5yDHvwQN16YS0L7C0kj34XI3o0q-_k4OgxIURo_9SZQ,7988 +sympy/functions/special/spherical_harmonics.py,sha256=Ivwi76IeFMZhukm_TnvJYT4QEqyW2DrGF5rj4_B-dJg,10997 +sympy/functions/special/tensor_functions.py,sha256=ZzMc93n_4Y4L-WVd9nmMh0nZQPYMB7uKqcnaFdupEXE,12277 +sympy/functions/special/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/functions/special/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_bessel.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_beta_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_bsplines.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_delta_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_elliptic_integrals.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_error_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_gamma_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_hyper.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_mathieu.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_singularity_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_spec_polynomials.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_spherical_harmonics.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_tensor_functions.cpython-310.pyc,, +sympy/functions/special/tests/__pycache__/test_zeta_functions.cpython-310.pyc,, +sympy/functions/special/tests/test_bessel.py,sha256=Gx6cjelB0aXGDKMwG5O-wpPjyt6rFJVaNenNmD5Qb3E,34191 +sympy/functions/special/tests/test_beta_functions.py,sha256=yxfgu-wmNEeMfaFABiDHYmuZpZup9FTp0ZYerlc6hhc,3786 +sympy/functions/special/tests/test_bsplines.py,sha256=6UYg7IqXTi8fcSOut8TEzNVkxIA4ff-CyG22qJnbIYA,7145 +sympy/functions/special/tests/test_delta_functions.py,sha256=8xhSWG4SLL86z1QKFfLk_3b--bCrxjvCaxHlODBVToE,7138 +sympy/functions/special/tests/test_elliptic_integrals.py,sha256=AazZYMow9szbvC_WfK10c5j-LQRAzno6V1WJCbtp4MU,6860 +sympy/functions/special/tests/test_error_functions.py,sha256=0U78aiO9zvGOrqQ7tiVTUhqnpj0FDD9shNb-8AOhp68,31222 +sympy/functions/special/tests/test_gamma_functions.py,sha256=exHmFEtyZMJhVYTWFSBlMZhWdhQk6M2cjgNkvImD7o4,29910 +sympy/functions/special/tests/test_hyper.py,sha256=El56dyyIzJkyBV_1gH-bGX8iF6Jzn0EhpmJEK57gvKs,15990 +sympy/functions/special/tests/test_mathieu.py,sha256=pqoFbnC84NDL6EQkigFtx5OQ1RFYppckTjzsm9XT0PY,1282 +sympy/functions/special/tests/test_singularity_functions.py,sha256=tqMJQIOOsBrveXctXPkPFIYdThG-wwKsjfdRHshEpfw,5467 +sympy/functions/special/tests/test_spec_polynomials.py,sha256=wuiZaR_LwaM8SlNuGl3B1p4eOHC_-zZVSXMPNfzKRB4,19561 +sympy/functions/special/tests/test_spherical_harmonics.py,sha256=pUFtFpNPBnJTdnqou0jniSchijyh1rdzKv8H24RT9FU,3850 +sympy/functions/special/tests/test_tensor_functions.py,sha256=bblSDkPABZ6N1j1Rb2Bb5TZIzZoK1D8ks3fHizi69ZI,5546 +sympy/functions/special/tests/test_zeta_functions.py,sha256=2r59_aC0QOXQsBNXqxsHPr2PkJExusI6qvSydZBPbfw,10474 +sympy/functions/special/zeta_functions.py,sha256=IdshdejjEv60nNZ4gQOVG0RIgxyo22psmglxZnzwHHw,24064 +sympy/galgebra.py,sha256=yEosUPSnhLp9a1NWXvpCLoU20J6TQ58XNIvw07POkVk,123 +sympy/geometry/__init__.py,sha256=BU2MiKm8qJyZJ_hz1qC-3nFJTPEcuvx4hYd02jHjqSM,1240 +sympy/geometry/__pycache__/__init__.cpython-310.pyc,, +sympy/geometry/__pycache__/curve.cpython-310.pyc,, +sympy/geometry/__pycache__/ellipse.cpython-310.pyc,, +sympy/geometry/__pycache__/entity.cpython-310.pyc,, +sympy/geometry/__pycache__/exceptions.cpython-310.pyc,, +sympy/geometry/__pycache__/line.cpython-310.pyc,, +sympy/geometry/__pycache__/parabola.cpython-310.pyc,, +sympy/geometry/__pycache__/plane.cpython-310.pyc,, +sympy/geometry/__pycache__/point.cpython-310.pyc,, +sympy/geometry/__pycache__/polygon.cpython-310.pyc,, +sympy/geometry/__pycache__/util.cpython-310.pyc,, +sympy/geometry/curve.py,sha256=F7b6XrlhUZ0QWLDoZJVojWfC5LeyOU-69OTFnYAREg8,10170 +sympy/geometry/ellipse.py,sha256=MMuWG_YOUngfW5137yu6iAOugjRxehrfkgidvD1J6RM,50851 +sympy/geometry/entity.py,sha256=fvHhtSb6RvE6v-8yMyCNvm0ekLPoO7EO9J8TEsGyQGU,20668 +sympy/geometry/exceptions.py,sha256=XtUMA44UTdrBWt771jegFC-TXsobhDiI-10TDH_WNFM,131 +sympy/geometry/line.py,sha256=JSc0dcjKV2m1R6b7tIaPjffhdGz3ZdtjFKvsH72Luqo,78343 +sympy/geometry/parabola.py,sha256=JalFtxCzBR8oE09agrzDtpGI9hrP4GJ-4zkg2r8Yj94,10707 +sympy/geometry/plane.py,sha256=A-CgWLjFC9k_OjyqJFaq7kDAdsSqmYET4aZl_eH2U10,26928 +sympy/geometry/point.py,sha256=8DtGkhQUyleVIi5WfptZOEk2zn0kwVAZv5aeNI498tg,36652 +sympy/geometry/polygon.py,sha256=hI1bRJdjCgsSKlPejO69z65LKO9iakcHx9ftJfSSLFA,81664 +sympy/geometry/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/geometry/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_curve.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_ellipse.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_entity.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_geometrysets.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_line.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_parabola.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_plane.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_point.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_polygon.cpython-310.pyc,, +sympy/geometry/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/geometry/tests/test_curve.py,sha256=xL4uRWAal4mXZxuQhcs9QOhs6MheCbFNyH1asq_a2IQ,4479 +sympy/geometry/tests/test_ellipse.py,sha256=oe9Bvye-kLjdhP3bwJPB0N1-wDL3cmVwYLhEhrGAPHk,25735 +sympy/geometry/tests/test_entity.py,sha256=0pBKdmRIETq0pJYjxRj34B0j-o56f4iqzJy9J4buU7U,3897 +sympy/geometry/tests/test_geometrysets.py,sha256=vvOWrFrJuNAFgbrVh1wPY94o-H-85FWlnIyyo2Kst9c,1911 +sympy/geometry/tests/test_line.py,sha256=D2yAOzCt80dmd7hP_l2A7aaWS8Mtw7RCkqA99L7McXI,37421 +sympy/geometry/tests/test_parabola.py,sha256=kd0RU5sGOcfp6jgwgXMtvT2B6kG1-M3-iGOLnUJfZOw,6150 +sympy/geometry/tests/test_plane.py,sha256=QRcfoDsJtCtcvjFb18hBEHupycLgAT2OohF6GpNShyQ,12525 +sympy/geometry/tests/test_point.py,sha256=YO67zimsEVO07KGyLJVTVWa9795faGXJoFFcd2K4azc,16412 +sympy/geometry/tests/test_polygon.py,sha256=79iBkQjpX-CdO1mtMaX3lGvVfkopBiFhLC3QfWCreWA,27138 +sympy/geometry/tests/test_util.py,sha256=-LXPTiibkSQ0TO7ia6a-NYfMm2OJxw15Er7tr99dTVU,6204 +sympy/geometry/util.py,sha256=ZMXFHU2sxVAvc4_ywomdJC67hHCU-EyJN2SzW5TB9Zw,20170 +sympy/holonomic/__init__.py,sha256=BgHIokaSOo3nwJlGO_caJHz37n6yoA8GeM9Xjn4zMpc,784 +sympy/holonomic/__pycache__/__init__.cpython-310.pyc,, +sympy/holonomic/__pycache__/holonomic.cpython-310.pyc,, +sympy/holonomic/__pycache__/holonomicerrors.cpython-310.pyc,, +sympy/holonomic/__pycache__/numerical.cpython-310.pyc,, +sympy/holonomic/__pycache__/recurrence.cpython-310.pyc,, +sympy/holonomic/holonomic.py,sha256=XxLDC4TG_6ddHMQ5yZNWNJFb6s7n5Tg09kbufyiwVVw,94849 +sympy/holonomic/holonomicerrors.py,sha256=qDyUoGbrRjPtVax4SeEEf_o6-264mASEZO_rZETXH5o,1193 +sympy/holonomic/numerical.py,sha256=m35A7jO54xMNgA4w5Edn1i_SHbXWBlpQTRLMR8GgbZE,2730 +sympy/holonomic/recurrence.py,sha256=JFgSOT3hu6d7Mh9sdqvSxC3RxlVlH_cygsXpsX97YMY,10987 +sympy/holonomic/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/holonomic/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/holonomic/tests/__pycache__/test_holonomic.cpython-310.pyc,, +sympy/holonomic/tests/__pycache__/test_recurrence.cpython-310.pyc,, +sympy/holonomic/tests/test_holonomic.py,sha256=MrN7GVk7_zFWwDSfIhtD3FgoFgmFGlTpjOnnIzdP010,34760 +sympy/holonomic/tests/test_recurrence.py,sha256=HEbA3yCnIw4IDFV1rb3GjmM4SCDDZL7aYRlD7PWuQFg,1056 +sympy/integrals/__init__.py,sha256=aZr2Qn6i-gvFGH_5Hl_SRn2-Bd9Sf4zQdwo9VGLSeNY,1844 +sympy/integrals/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/__pycache__/deltafunctions.cpython-310.pyc,, +sympy/integrals/__pycache__/heurisch.cpython-310.pyc,, +sympy/integrals/__pycache__/integrals.cpython-310.pyc,, +sympy/integrals/__pycache__/intpoly.cpython-310.pyc,, +sympy/integrals/__pycache__/laplace.cpython-310.pyc,, +sympy/integrals/__pycache__/manualintegrate.cpython-310.pyc,, +sympy/integrals/__pycache__/meijerint.cpython-310.pyc,, +sympy/integrals/__pycache__/meijerint_doc.cpython-310.pyc,, +sympy/integrals/__pycache__/prde.cpython-310.pyc,, +sympy/integrals/__pycache__/quadrature.cpython-310.pyc,, +sympy/integrals/__pycache__/rationaltools.cpython-310.pyc,, +sympy/integrals/__pycache__/rde.cpython-310.pyc,, +sympy/integrals/__pycache__/risch.cpython-310.pyc,, +sympy/integrals/__pycache__/singularityfunctions.cpython-310.pyc,, +sympy/integrals/__pycache__/transforms.cpython-310.pyc,, +sympy/integrals/__pycache__/trigonometry.cpython-310.pyc,, +sympy/integrals/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/integrals/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/benchmarks/__pycache__/bench_integrate.cpython-310.pyc,, +sympy/integrals/benchmarks/__pycache__/bench_trigintegrate.cpython-310.pyc,, +sympy/integrals/benchmarks/bench_integrate.py,sha256=vk6wAO1bqzFT9oW4qsW7nKGfc_gP0XaB5PMYKx5339Q,396 +sympy/integrals/benchmarks/bench_trigintegrate.py,sha256=8XU3uB3mcavigvzHQZA7H1sHI32zgT-9RkSnLa-Y3Vc,305 +sympy/integrals/deltafunctions.py,sha256=ysIQLdRBcG_YR-bVDoxt-sxEVU8TG77oSgM-J0gI0mE,7435 +sympy/integrals/heurisch.py,sha256=R3G0RXskAxXum4CyQ1AV1BNeVbcmvp_Ipg0mOcDFRPo,26296 +sympy/integrals/integrals.py,sha256=bC0WtE12WsV7WFzmZrKzct2nAbHUdbq6dKytpY7ZtlY,64606 +sympy/integrals/intpoly.py,sha256=qs1fQrEMKbsXwgfkBDUpEZ9f7x65Bdua8KS2lLBtLv4,43274 +sympy/integrals/laplace.py,sha256=eL7HjKsSLAspdo8BswrYADs2wd2U-9YEkinSD5JVjow,63518 +sympy/integrals/manualintegrate.py,sha256=E7NaMsl02Hy2lHU8mPcxNSsCQnQjVNPJqDrMyEOkAKw,75469 +sympy/integrals/meijerint.py,sha256=Yf80w6COiqdrvYLyMwS1P2-SGsNR1B7cqCmaERhx76U,80746 +sympy/integrals/meijerint_doc.py,sha256=mGlIu2CLmOulSGiN7n7kQ9w2DTcQfExJPaf-ee6HXlY,1165 +sympy/integrals/prde.py,sha256=VL_JEu6Bqhl8wSML1UY9nilOjafhkjFenVGCVV1pVbc,52021 +sympy/integrals/quadrature.py,sha256=6Bg3JmlIjIduIfaGfNVcwNfSrgEiLOszcN8WPzsXNqE,17064 +sympy/integrals/rationaltools.py,sha256=1OMhRhMBQ7igw2_YX5WR4q69QB_H0zMtGFtUkcbVD3Q,10922 +sympy/integrals/rde.py,sha256=AuiPDqP2awC4UlWJrsfNCn1l3OAQuZl64WI-lE2M5Ds,27392 +sympy/integrals/risch.py,sha256=S9r1kKx6WoJHomPWgNL2KCe73GWS8jIJ0AZt95QwBFI,67674 +sympy/integrals/singularityfunctions.py,sha256=BegUcpUW96FY9f8Yn0jHjK0LjCkM28NnCVg5S9cTWwU,2227 +sympy/integrals/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/integrals/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_deltafunctions.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_failing_integrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_heurisch.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_integrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_intpoly.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_laplace.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_lineintegrals.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_manual.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_meijerint.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_prde.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_quadrature.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_rationaltools.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_rde.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_risch.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_singularityfunctions.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_transforms.cpython-310.pyc,, +sympy/integrals/tests/__pycache__/test_trigonometry.cpython-310.pyc,, +sympy/integrals/tests/test_deltafunctions.py,sha256=ivFjS-WlLQ4aMqjVS7ZzMChP2Mmw_JUPnwI9otiLnvs,3709 +sympy/integrals/tests/test_failing_integrals.py,sha256=hQJc23KfK0bUmbj4W3C04QdJ0K17_ghMVfTLuKjUBPc,7074 +sympy/integrals/tests/test_heurisch.py,sha256=r4RjbSRYScuzMXA_EjrxalO1T1G0i5ZsAmDQcrhFU3s,12468 +sympy/integrals/tests/test_integrals.py,sha256=jwaCvWJoW_5_CTkDDBJeRDtLCUHdYzyzs-f7GyJDaVc,77122 +sympy/integrals/tests/test_intpoly.py,sha256=NzGhkR2pUMfd8lIU2cFR9bFa0J89RzpHs3zDggAWtXo,37445 +sympy/integrals/tests/test_laplace.py,sha256=FQoGfwyNoIwqdVc5Nk_RcOIJU70EaW-ipmoQtq7nFLk,28893 +sympy/integrals/tests/test_lineintegrals.py,sha256=zcPJ2n7DYt9KsgAe38t0gq3ARApUlb-kBahLThuRcq8,450 +sympy/integrals/tests/test_manual.py,sha256=arqxMdxUJkFIoy98rOirOTIwj623wHx9NqoupZLqkU8,33231 +sympy/integrals/tests/test_meijerint.py,sha256=jglmmX-AtkvwJgqQafBOKdaygrm14QJ8H-NfheNpFME,32265 +sympy/integrals/tests/test_prde.py,sha256=2BZmEDasdx_3l64-9hioArysDj6Nl520GpQN2xnEE_A,16360 +sympy/integrals/tests/test_quadrature.py,sha256=iFMdqck36gkL-yksLflawIOYmw-0PzO2tFj_qdK6Hjg,19919 +sympy/integrals/tests/test_rationaltools.py,sha256=6sNOkkZmOvCAPTwXrdU6hehDFleXYyakheX2KQaUHWY,5299 +sympy/integrals/tests/test_rde.py,sha256=4d3vJupa-hRN4yNDISY8IC3rSI_cZW5BbtxoZm14y-Y,9571 +sympy/integrals/tests/test_risch.py,sha256=HaWg0JnErdrNzNmVfyz2Zz4XAgZPVVpZPt6Map3sQ58,38630 +sympy/integrals/tests/test_singularityfunctions.py,sha256=CSrHie59_NjNZ9B2GaHzKPNsMzxm5Kh6GuxlYk8zTuI,1266 +sympy/integrals/tests/test_transforms.py,sha256=Of9XEpzwB0CGy722z41oOdUEbfmAscsAhMute2_8oeA,27077 +sympy/integrals/tests/test_trigonometry.py,sha256=moMYr_Prc7gaYPjBK0McLjRpTEes2veUlN0vGv9UyEA,3869 +sympy/integrals/transforms.py,sha256=R625sYSQkNC1s9MiFdk0JzROTmoYjhgBTxoFE5Pc3rQ,51636 +sympy/integrals/trigonometry.py,sha256=iOoBDGFDZx8PNbgL3XeZEd80I8ro0WAizNuC4P-u8x0,11083 +sympy/interactive/__init__.py,sha256=yokwEO2HF3eN2Xu65JSpUUsN4iYmPvvU4m_64f3Q33o,251 +sympy/interactive/__pycache__/__init__.cpython-310.pyc,, +sympy/interactive/__pycache__/printing.cpython-310.pyc,, +sympy/interactive/__pycache__/session.cpython-310.pyc,, +sympy/interactive/__pycache__/traversal.cpython-310.pyc,, +sympy/interactive/printing.py,sha256=j7iVj-AhX3qBrQibPKtDNTMToCGhF6UKTdpUO8ME5CM,22700 +sympy/interactive/session.py,sha256=sG546e0mAtT0OrFkYNVM7QGvkWrDhAQZ5E1hfx03iBQ,15329 +sympy/interactive/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/interactive/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/interactive/tests/__pycache__/test_interactive.cpython-310.pyc,, +sympy/interactive/tests/__pycache__/test_ipython.cpython-310.pyc,, +sympy/interactive/tests/test_interactive.py,sha256=Pbopy9lODrd_P46_xxlWxLwqPfG6_4J3CWWC4IqfDL4,485 +sympy/interactive/tests/test_ipython.py,sha256=iYNmuETjveHBVpOywyv_jStQWkFwf1GuEBjoZUVhxK4,11799 +sympy/interactive/traversal.py,sha256=XbccdO6msNAvrG6FFJl2n4XmIiRISnvda4QflfEPg7U,3189 +sympy/liealgebras/__init__.py,sha256=K8tw7JqG33_y6mYl1LTr8ZNtKH5L21BqkjCHfLhP4aA,79 +sympy/liealgebras/__pycache__/__init__.cpython-310.pyc,, +sympy/liealgebras/__pycache__/cartan_matrix.cpython-310.pyc,, +sympy/liealgebras/__pycache__/cartan_type.cpython-310.pyc,, +sympy/liealgebras/__pycache__/dynkin_diagram.cpython-310.pyc,, +sympy/liealgebras/__pycache__/root_system.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_a.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_b.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_c.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_d.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_e.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_f.cpython-310.pyc,, +sympy/liealgebras/__pycache__/type_g.cpython-310.pyc,, +sympy/liealgebras/__pycache__/weyl_group.cpython-310.pyc,, +sympy/liealgebras/cartan_matrix.py,sha256=yr2LoZi_Gxmu-EMKgFuPOPNMYPOsxucLAS6oRpSYi2U,524 +sympy/liealgebras/cartan_type.py,sha256=xLklg8Y5s40je6sXwmLmG9iyYi9YEk9KoxTSFz1GtdI,1790 +sympy/liealgebras/dynkin_diagram.py,sha256=ZzGuBGNOJ3lPDdJDs4n8hvGbz6wLhC5mwb8zFkDmyPw,535 +sympy/liealgebras/root_system.py,sha256=GwWc4iploE7ogS9LTOkkjsij1mbPMQxbV2_pvNriYbE,6727 +sympy/liealgebras/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/liealgebras/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_cartan_matrix.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_cartan_type.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_dynkin_diagram.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_root_system.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_A.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_B.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_C.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_D.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_E.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_F.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_type_G.cpython-310.pyc,, +sympy/liealgebras/tests/__pycache__/test_weyl_group.cpython-310.pyc,, +sympy/liealgebras/tests/test_cartan_matrix.py,sha256=KCsakn0fHKHRbIUcrUkHBIKkudl3_ISUdHrfJy-UOd4,303 +sympy/liealgebras/tests/test_cartan_type.py,sha256=t5PvYYDXbNIFL3CV59Je7SBIAeLLf-W3mOINPUoHK6E,339 +sympy/liealgebras/tests/test_dynkin_diagram.py,sha256=DSixbnt_yd0zrhKzXW_XqkXWXYe1Dk2MmXN-Rjb1dGg,260 +sympy/liealgebras/tests/test_root_system.py,sha256=YmGBdUeJ4PkLSfAfRgTF7GW62RCEd5nH27FSX9UaG5Q,927 +sympy/liealgebras/tests/test_type_A.py,sha256=x7QmpjxsGmXol-IYVtN1lmIOmM3HLYwpX1tSG5h6FMM,657 +sympy/liealgebras/tests/test_type_B.py,sha256=Gw0GP24wP2rPn38Wwla9W7BwWH4JtCGpaprZb5W6JVY,642 +sympy/liealgebras/tests/test_type_C.py,sha256=ysSy-vzE9lNwzAunrmvnFkLBoJwF7W2On7QpqS6RI1s,927 +sympy/liealgebras/tests/test_type_D.py,sha256=qrO4oCjrjkp1uDvrNtbgANVyaOExqOLNtIpIxD1uH0U,764 +sympy/liealgebras/tests/test_type_E.py,sha256=suG6DaZ2R74ovnJrY6GGyiu9A6FjUkouRNUFPnEczqk,775 +sympy/liealgebras/tests/test_type_F.py,sha256=yUQJ7LzTemv4Cd1XW_dr3x7KEI07BahsWAyJfXLS1eA,1378 +sympy/liealgebras/tests/test_type_G.py,sha256=wVa6qcAHbdrc9dA63samexHL35cWWJS606pom-6mH2Q,548 +sympy/liealgebras/tests/test_weyl_group.py,sha256=HrzojRECbhNUsdLFQAXYnJEt8LfktOSJZuqVE45aRnc,1501 +sympy/liealgebras/type_a.py,sha256=l5SUJknj1xLgwRVMuOsVmwbcxY2V6PU59jBtssylKH4,4314 +sympy/liealgebras/type_b.py,sha256=50xdcrec1nFFtyUWOmP2Qm9ZW1zpbrgwbz_YPKp55Go,4563 +sympy/liealgebras/type_c.py,sha256=bXGqPiLN3x4NAsM-ZHKJPxFO6RY7lDZUckCarIODEi0,4439 +sympy/liealgebras/type_d.py,sha256=Rgh7KpI5FQnDai6KVfoz_TREYaKxqvINDXu6Zdu-7EQ,4694 +sympy/liealgebras/type_e.py,sha256=Uf-QzI-6bRJeI91stGHsiesknwBEVYIjZaiNP-2bIiY,9780 +sympy/liealgebras/type_f.py,sha256=boKDhOxRcAWDBHsEYk4j14vUvT0mO3UkRq6QzqoPOes,4417 +sympy/liealgebras/type_g.py,sha256=Ife98dGPtarGd-ii8hJbXdB0SMsct4okDkSX2wLN8XI,2965 +sympy/liealgebras/weyl_group.py,sha256=5YFA8qC4GWDM0WLNR_6VgpuNFZDfyDA7fBFjBcZaLgA,14557 +sympy/logic/__init__.py,sha256=RfoXrq9MESnXdL7PkwpYEfWeaxH6wBPHiE4zCgLKvk0,456 +sympy/logic/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/__pycache__/boolalg.cpython-310.pyc,, +sympy/logic/__pycache__/inference.cpython-310.pyc,, +sympy/logic/algorithms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/logic/algorithms/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/dpll.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/dpll2.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/minisat22_wrapper.cpython-310.pyc,, +sympy/logic/algorithms/__pycache__/pycosat_wrapper.cpython-310.pyc,, +sympy/logic/algorithms/dpll.py,sha256=zqiZDm1oD5sNxFqm_0Hen6NjfILIDp5uRgEOad1vYXI,9188 +sympy/logic/algorithms/dpll2.py,sha256=UbBxJjiUaqBbQPaivtrv3ZhNNuHHdUsJ5Us2vy8QmxA,20317 +sympy/logic/algorithms/minisat22_wrapper.py,sha256=uINcvkIHGWYJb8u-Q0OgnSgaHfVUd9tYYFbBAVNiASo,1317 +sympy/logic/algorithms/pycosat_wrapper.py,sha256=0vNFTbu9-YhSfjwYTsZsP_Z4HM8WpL11-xujLBS1kYg,1207 +sympy/logic/boolalg.py,sha256=-t3WrVge-B7WmoUF25BfOxK15rsC0tIfigdcCcgvbdQ,114180 +sympy/logic/inference.py,sha256=18eETh6ObPCteJJgrrtrkCK031ymDQdvQbveaUymCcM,8542 +sympy/logic/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/logic/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc,, +sympy/logic/tests/__pycache__/test_inference.cpython-310.pyc,, +sympy/logic/tests/test_boolalg.py,sha256=L6hUEjRIhn2Dh65BDXifDrgXHuvBoATT89-6dYZHzgo,48838 +sympy/logic/tests/test_dimacs.py,sha256=EK_mA_k9zBLcQLTOKTZVrGhnGuQNza5mwXDQD_f-X1c,3886 +sympy/logic/tests/test_inference.py,sha256=DOlgb4clEULjMBp0cG3ZdCrXN8vFdxJZmSDf-13bWSA,13246 +sympy/logic/utilities/__init__.py,sha256=WTn2vBgHcmhONRWI79PdMYNk8UxYDzsxRlZWuc-wtNI,55 +sympy/logic/utilities/__pycache__/__init__.cpython-310.pyc,, +sympy/logic/utilities/__pycache__/dimacs.cpython-310.pyc,, +sympy/logic/utilities/dimacs.py,sha256=aaHdXUOD8kZHWbTzuZc6c5xMM8O1oHbRxyOxPpVMMdQ,1663 +sympy/matrices/__init__.py,sha256=BUbgKPUXTwvrhDbQjjG6c3jFBwmQ0WfRiMQTTFnPL90,2611 +sympy/matrices/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/__pycache__/common.cpython-310.pyc,, +sympy/matrices/__pycache__/decompositions.cpython-310.pyc,, +sympy/matrices/__pycache__/dense.cpython-310.pyc,, +sympy/matrices/__pycache__/determinant.cpython-310.pyc,, +sympy/matrices/__pycache__/eigen.cpython-310.pyc,, +sympy/matrices/__pycache__/graph.cpython-310.pyc,, +sympy/matrices/__pycache__/immutable.cpython-310.pyc,, +sympy/matrices/__pycache__/inverse.cpython-310.pyc,, +sympy/matrices/__pycache__/matrices.cpython-310.pyc,, +sympy/matrices/__pycache__/normalforms.cpython-310.pyc,, +sympy/matrices/__pycache__/reductions.cpython-310.pyc,, +sympy/matrices/__pycache__/repmatrix.cpython-310.pyc,, +sympy/matrices/__pycache__/solvers.cpython-310.pyc,, +sympy/matrices/__pycache__/sparse.cpython-310.pyc,, +sympy/matrices/__pycache__/sparsetools.cpython-310.pyc,, +sympy/matrices/__pycache__/subspaces.cpython-310.pyc,, +sympy/matrices/__pycache__/utilities.cpython-310.pyc,, +sympy/matrices/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/benchmarks/__pycache__/bench_matrix.cpython-310.pyc,, +sympy/matrices/benchmarks/bench_matrix.py,sha256=vGMlg-2il2cFeAWrf0NJ6pzPX3Yd3ZQMxFgQ4q5ILQE,306 +sympy/matrices/common.py,sha256=LnBG-5vXn6c8Oe9C-Q4ziQvNyJSu5l_4DirQ-VZ2rfM,93370 +sympy/matrices/decompositions.py,sha256=MYLr-Qt5wZTDBrnVmBAudOM5QYIgkXWtLDA0coLWk50,48074 +sympy/matrices/dense.py,sha256=cTAq0K3GnLBiNkCgZNVr9rLt8H3rrnyhHaeLc_YTBok,30375 +sympy/matrices/determinant.py,sha256=IxURxqbmux4jXwkIXMm0cxJ3oygY6InrqkVo4ZnD-nk,30118 +sympy/matrices/eigen.py,sha256=7vgLspYAIVmiFtVJ9wNiVLKrQSTGhqLtPR_wqdX0WRc,39786 +sympy/matrices/expressions/__init__.py,sha256=IMqXCSsPh0Vp_MC9HZTudA5DGM4WBq_yB-Bst0azyM8,1692 +sympy/matrices/expressions/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/_shape.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/adjoint.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/applyfunc.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/blockmatrix.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/companion.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/determinant.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/diagonal.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/dotproduct.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/factorizations.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/fourier.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/funcmatrix.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/hadamard.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/inverse.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/kronecker.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matadd.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matexpr.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matmul.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/matpow.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/permutation.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/sets.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/slice.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/special.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/trace.cpython-310.pyc,, +sympy/matrices/expressions/__pycache__/transpose.cpython-310.pyc,, +sympy/matrices/expressions/_shape.py,sha256=fgKRp_3LrDvFYBYz2M0BqTbjAlKLtx6Gpy9g78wHpVQ,3058 +sympy/matrices/expressions/adjoint.py,sha256=CbkYP2Hi9JVb7WO5HiCE14fwOn16fT3Le5HfV30cpCQ,1572 +sympy/matrices/expressions/applyfunc.py,sha256=wFgcMOp6uakZ6wkkF7mB7GwM35GS5SGzXz1LCeJbemE,6749 +sympy/matrices/expressions/blockmatrix.py,sha256=eKQ4GlVm4_6i2bah7T95qtJdXWLJJ28yry27ajGGfIo,31809 +sympy/matrices/expressions/companion.py,sha256=lXUJRbjQR6e1mdHQdJwNIJXMW80XmKbOVqNvUXjB57U,1705 +sympy/matrices/expressions/determinant.py,sha256=wmtIB5q1_cJpnHSSsQT2MjE6wJdDV1RtZudGOzDJmG4,3173 +sympy/matrices/expressions/diagonal.py,sha256=NtIFAfpoI_jhElfkJ6WCxc4r9iWN8VBOR3LLxKEzJsE,6326 +sympy/matrices/expressions/dotproduct.py,sha256=sKdUhwVKTB3LEvd8xMwCDexNoQ1Dz43DCYsmm3UwFWw,1911 +sympy/matrices/expressions/factorizations.py,sha256=zFNjMBsJqhsIcDD8Me4W8-Q-TV89WptfG3Dd9yK_tPE,1456 +sympy/matrices/expressions/fourier.py,sha256=dvaftgB9jgkR_8ETyhzyVLtf1ZJu_wQC-ZbpTYMXZGE,2094 +sympy/matrices/expressions/funcmatrix.py,sha256=q6R75wLn0UdV4xJdVJUrNaofV1k1egXLLQdBeZcPtiY,3520 +sympy/matrices/expressions/hadamard.py,sha256=S-vY0RFuV7Xyf6kBwgQiGXJnci7j5gpxN8nazW1IGwE,13918 +sympy/matrices/expressions/inverse.py,sha256=ZJSzuTgKz01zmb3dnmFKn6AmR6gXd_5zEYzHkk8cF2o,2732 +sympy/matrices/expressions/kronecker.py,sha256=_JPrC-FruT4N2Sgl4hQdjThjFFfHsHGTLubvU4m3uvU,13398 +sympy/matrices/expressions/matadd.py,sha256=LwznSmZRJQt_sDeq_lcXsUXlSyrcE8J-cwgvi9saUDg,4771 +sympy/matrices/expressions/matexpr.py,sha256=1pswXMAOjYk3YwUhPxCoax2lIZ1rQgnskPdlE1gWhHY,27471 +sympy/matrices/expressions/matmul.py,sha256=bewNxpEnQ0WaVzHzpVgfF_5VHdBLroewZbBAxJTvHgE,15586 +sympy/matrices/expressions/matpow.py,sha256=gF0cscUBvOuAzsGbzN6VgkMPSgz_2_3wShl67B6YGo8,4916 +sympy/matrices/expressions/permutation.py,sha256=gGIht-JI1zWyZz7VPvm5S1Ae2i-P0WUAJl3euLRXWtM,8046 +sympy/matrices/expressions/sets.py,sha256=KxGHZ-4p4nALQBj2f1clG43lB4qYu6M2P0zpubiH-ik,2001 +sympy/matrices/expressions/slice.py,sha256=aNdY1Ey4VJR-UCvoORX2kh2DmA6QjOp-waENvWg8WVE,3355 +sympy/matrices/expressions/special.py,sha256=UH0sOc_XhRHaW5ERyVVHtNTlmfHYiUdRmYzXjcSbCzE,7495 +sympy/matrices/expressions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/expressions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_adjoint.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_applyfunc.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_blockmatrix.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_companion.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_derivatives.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_determinant.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_diagonal.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_dotproduct.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_factorizations.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_fourier.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_funcmatrix.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_hadamard.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_indexing.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_inverse.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_kronecker.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matadd.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matexpr.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matmul.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_matpow.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_permutation.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_sets.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_slice.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_special.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_trace.cpython-310.pyc,, +sympy/matrices/expressions/tests/__pycache__/test_transpose.cpython-310.pyc,, +sympy/matrices/expressions/tests/test_adjoint.py,sha256=cxOc334yNSI9MazhG9HT8s1OCXjkDWr3Zj2JnyHS3Z4,1065 +sympy/matrices/expressions/tests/test_applyfunc.py,sha256=mxTJaoB4Ze50lk-2TgVopmrrbuQbEqUsZwc3K1H8w-Q,3522 +sympy/matrices/expressions/tests/test_blockmatrix.py,sha256=EHJWm2dniNmf1CfODQSPm_HCCV77Ia0FbeNigsYJXZY,15695 +sympy/matrices/expressions/tests/test_companion.py,sha256=Lam6r-cSOokjhSlJws55Kq-gL5_pHfeV_Xuvmn5PkRU,1657 +sympy/matrices/expressions/tests/test_derivatives.py,sha256=9mBeaAZDX7-JbYs6tMClNuGDygETVN_dCXSlHmyAhwg,15991 +sympy/matrices/expressions/tests/test_determinant.py,sha256=QutUKtr35GCZ4iS2H1WTzMwa0jAvL0prcS82Untgr5k,1989 +sympy/matrices/expressions/tests/test_diagonal.py,sha256=3L6Vs_Yr36a8dgIqAeIcNEf0xcVyeyGhANNu0dlIpwI,4516 +sympy/matrices/expressions/tests/test_dotproduct.py,sha256=Zkv2N6oRPm0-sN4PFwsVFrM5Y_qv4x2gWqQQQD86hBY,1171 +sympy/matrices/expressions/tests/test_factorizations.py,sha256=6UPA_UhCL5JPbaQCOatMnxhGnQ-aIHmb3lXqbwrSoIE,786 +sympy/matrices/expressions/tests/test_fourier.py,sha256=0eD69faoHXBcuQ7g2Q31fqs-gyR_Xfe-gv-7DXhJh_c,1638 +sympy/matrices/expressions/tests/test_funcmatrix.py,sha256=zmOEcXHCK2MziwVBJb7iq9Q-Lbl4bbCQ_RAk27c7qUU,2381 +sympy/matrices/expressions/tests/test_hadamard.py,sha256=WDelP7lQ9KqsalOOlWHaZq38nTijkRUMAXMcAvU42SM,4610 +sympy/matrices/expressions/tests/test_indexing.py,sha256=wwYQa7LNlzhBA5fU50gPyE8cqaJf0s3O70PUx4eNCEA,12038 +sympy/matrices/expressions/tests/test_inverse.py,sha256=33Ui_vXZBJR1gMirb8c5xHDnx2jpVjWoVpYmVuZQoJg,2060 +sympy/matrices/expressions/tests/test_kronecker.py,sha256=e5H6av3ioOn8jkjyDBrT3NEmCkyHbN6ZEHOlyB9OYLk,5366 +sympy/matrices/expressions/tests/test_matadd.py,sha256=DkK_RuIFA9H9HoWcegtPWRHfQNg17h5CfqUD26E8u8E,1862 +sympy/matrices/expressions/tests/test_matexpr.py,sha256=lBuqWCwSevU7JL66eoHWrxL5gIvaWmkminDoqFmpyKA,17409 +sympy/matrices/expressions/tests/test_matmul.py,sha256=MuMIzP-ouiuRuTU5PmBtU-Xk_0Btu4mym-C20M8lN58,5963 +sympy/matrices/expressions/tests/test_matpow.py,sha256=3tRbEmZi2gZTmkBm7mAWUDbX4jwEfC8tC4kYoOuzaUg,7304 +sympy/matrices/expressions/tests/test_permutation.py,sha256=93Cqjj2k3aoR3ayMJLdJUa5h1u87bRRxT3I8B4FQsvU,5607 +sympy/matrices/expressions/tests/test_sets.py,sha256=x60NRXGjxS_AE37jGFAOvZdKlWW5m4X0C3OzIukftAM,1410 +sympy/matrices/expressions/tests/test_slice.py,sha256=C7OGAQQTz0YZxZCa7g0m8_0Bqq8jaPRa22JHVSqK7tY,2027 +sympy/matrices/expressions/tests/test_special.py,sha256=Mhg71vnjjb4fm0jZgjDoWW8rAJMBeh8aDCM75gjEpKQ,6496 +sympy/matrices/expressions/tests/test_trace.py,sha256=fRlrw9CfdO3z3SI4TQb1fCUb_zVAndbtyOErEeCTCQ0,3383 +sympy/matrices/expressions/tests/test_transpose.py,sha256=P3wPPRywKnrAppX6gssgD66v0RIcolxqDkCaKGGPVcM,1987 +sympy/matrices/expressions/trace.py,sha256=Iqg3wgO7tTTVZGo1qbXKn99qTss-5znAW6-lLrhuIIs,5348 +sympy/matrices/expressions/transpose.py,sha256=SnfU_CE3_dBQkbi_SkPGqsE8eDgstYuplx7XDxKJIyA,2691 +sympy/matrices/graph.py,sha256=O73INKAbTpnzNdZ7y08ow9U2CmApdn7S9NEsA9LR-XQ,9076 +sympy/matrices/immutable.py,sha256=3NWY8oHiTGdWQR6AfZpg2fOtjRc1KH75yxkITNzCcPg,5425 +sympy/matrices/inverse.py,sha256=pGDQ3-iG9oTMEIuCwrFe0X5lxkvZSF-iMzod8zTv1OA,11409 +sympy/matrices/matrices.py,sha256=thx6Ks7DAts1FUB3l3cu4s3HRJ952mGNlXstLVvR4jM,75508 +sympy/matrices/normalforms.py,sha256=KiiKxxnYEaoA75UJjYFGqVLipgraNlG3Dlh9E2c1Q7k,3808 +sympy/matrices/reductions.py,sha256=GmXqmi3mgxi-jUiSx-B8xN0M7qLLovdDDTzjoMZvQR0,10781 +sympy/matrices/repmatrix.py,sha256=JIt55DuimIz7xN0WjdPzZhQmYbaqnDOT5xCRowPR2pY,21962 +sympy/matrices/solvers.py,sha256=IDDTmTY9FTZsbTwPC4oVG_0ZV8v6ey0JbhCFHulNm2E,22764 +sympy/matrices/sparse.py,sha256=KFRkfQ6iyLekYMc-0VJffNKzf7EeFvIk2zRsFoQwwcI,14675 +sympy/matrices/sparsetools.py,sha256=tzI541P8QW_v1eVJAXgOlo_KK1Xp6u1geawX_tdlBxY,9182 +sympy/matrices/subspaces.py,sha256=uLo4qnP0xvFcFo5hhf6g7pHSHiRbcQ1ATDKwGBxW7CE,3761 +sympy/matrices/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/matrices/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_commonmatrix.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_decompositions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_determinant.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_eigen.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_graph.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_immutable.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_interactions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_matrices.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_normalforms.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_reductions.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_sparse.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_sparsetools.cpython-310.pyc,, +sympy/matrices/tests/__pycache__/test_subspaces.cpython-310.pyc,, +sympy/matrices/tests/test_commonmatrix.py,sha256=9xvYBhxFJm020OhVDKWIj-m1PGtkvHFwtV7iL67SdUI,38564 +sympy/matrices/tests/test_decompositions.py,sha256=SvjGIKZawYyotzbbwpwpcC7fV-nZRNlDwRhq1AL2AQ0,14417 +sympy/matrices/tests/test_determinant.py,sha256=RYmf2bLWtk8nuyIJuhRpSIFklsfVtAGa2gx2AvAi2TU,13350 +sympy/matrices/tests/test_eigen.py,sha256=guJ56Hd33ScYp2DPLMQ-mj6WtG7JbRB5pvJLv6SeP-0,22773 +sympy/matrices/tests/test_graph.py,sha256=ckfGDCg2M6gluv9XFnfURga8gxd2HTL7aX281s6wy6c,3213 +sympy/matrices/tests/test_immutable.py,sha256=qV1L1i8RWX3ihJx3J-M07s_thfXmuUA1wIRfQnUbqyA,4618 +sympy/matrices/tests/test_interactions.py,sha256=RKQsDDiwuEZxL7-bTJR_ue7DKGbCZYl7pvjjgE7EyEY,2066 +sympy/matrices/tests/test_matrices.py,sha256=WHL_ngSJgL_R4CBPACf4GPfand2bOGvVhjHcjJyFCY4,144201 +sympy/matrices/tests/test_normalforms.py,sha256=JQvFfp53MW8cJhxEkyNvsMmhhD7FVncAkjuGMXu5Fok,3009 +sympy/matrices/tests/test_reductions.py,sha256=xbB-_vbF9IYIzvkaOjsVeFfJHRk3buFRNdxKGZvuZXE,13951 +sympy/matrices/tests/test_solvers.py,sha256=hsbvtRyBhLzTxX62AYqDTn7bltGanT1NwYUecUPEViE,20386 +sympy/matrices/tests/test_sparse.py,sha256=GvXN6kBVldjqoR8WN8I_PjblKhRmyRWvVuLUgZEgugY,23281 +sympy/matrices/tests/test_sparsetools.py,sha256=pjQR6UaEMR92NolB_IGZ9Umk6FPZjvI0vk1Fd4H_C5I,4877 +sympy/matrices/tests/test_subspaces.py,sha256=vnuIyKbViZMa-AHCZ3PI9HbCL_t-LNI70gwbZvzRtzw,3839 +sympy/matrices/utilities.py,sha256=mMnNsDTxGKqiG0JATsM4W9b5jglhacy-vmRw2aZojgY,2117 +sympy/multipledispatch/__init__.py,sha256=aV2NC2cO_KmD6QFiwy4oC1D8fm3pFuPbaiTMeWmNWak,259 +sympy/multipledispatch/__pycache__/__init__.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/conflict.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/core.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/dispatcher.cpython-310.pyc,, +sympy/multipledispatch/__pycache__/utils.cpython-310.pyc,, +sympy/multipledispatch/conflict.py,sha256=rR6tKn58MfhMMKZ4ZrhVduylXd9f5PjT2TpzM9LMB6o,2117 +sympy/multipledispatch/core.py,sha256=I4WOnmu1VtlaCnn2oD9R2-xckkYLRZPNFEWtCOTAYfM,2261 +sympy/multipledispatch/dispatcher.py,sha256=A2I4upt4qNollXGpwzrqg7M0oKHJhZx1BUMIBnjRIow,12226 +sympy/multipledispatch/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/multipledispatch/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_conflict.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/multipledispatch/tests/__pycache__/test_dispatcher.cpython-310.pyc,, +sympy/multipledispatch/tests/test_conflict.py,sha256=msNVSiikuPOqsEm_MMGmjsNbA2CAR0F1FZaHskzzo04,1786 +sympy/multipledispatch/tests/test_core.py,sha256=UfH_7cyvZ6PHjdH8vmLG49CG7E30W8uxm3FthuMc1Jk,4048 +sympy/multipledispatch/tests/test_dispatcher.py,sha256=saJPpGXLpLOuRfw-ekzZGzY-Rys0NsS5ke0n33i9j0U,6228 +sympy/multipledispatch/utils.py,sha256=39wB9i8jNhlLFZyCTFnioLx5N_CNWv4r5VZwKrxswIE,3097 +sympy/ntheory/__init__.py,sha256=MBs5Tdw5xAgNMlCdN8fSLiIswQudZibIbHjI9L5BEds,2746 +sympy/ntheory/__pycache__/__init__.cpython-310.pyc,, +sympy/ntheory/__pycache__/bbp_pi.cpython-310.pyc,, +sympy/ntheory/__pycache__/continued_fraction.cpython-310.pyc,, +sympy/ntheory/__pycache__/digits.cpython-310.pyc,, +sympy/ntheory/__pycache__/ecm.cpython-310.pyc,, +sympy/ntheory/__pycache__/egyptian_fraction.cpython-310.pyc,, +sympy/ntheory/__pycache__/elliptic_curve.cpython-310.pyc,, +sympy/ntheory/__pycache__/factor_.cpython-310.pyc,, +sympy/ntheory/__pycache__/generate.cpython-310.pyc,, +sympy/ntheory/__pycache__/modular.cpython-310.pyc,, +sympy/ntheory/__pycache__/multinomial.cpython-310.pyc,, +sympy/ntheory/__pycache__/partitions_.cpython-310.pyc,, +sympy/ntheory/__pycache__/primetest.cpython-310.pyc,, +sympy/ntheory/__pycache__/qs.cpython-310.pyc,, +sympy/ntheory/__pycache__/residue_ntheory.cpython-310.pyc,, +sympy/ntheory/bbp_pi.py,sha256=p4OLH6B7CFmpTQPM2DNvxWW3T-PYNha5EPAE649i_tA,5252 +sympy/ntheory/continued_fraction.py,sha256=-GA1fzvgK7h8Bad_1NN0majRhwIQEg2zZDPuKSHAVYA,10109 +sympy/ntheory/digits.py,sha256=xFzoMyAC36fLR5OvtTetoXUSvhNTbP3HKY_co8RUEr4,3688 +sympy/ntheory/ecm.py,sha256=3ot2F6V8TSsaFEZndxxDDyqnT0jQ67Xdq0e3cuea_UE,10618 +sympy/ntheory/egyptian_fraction.py,sha256=hW886hPWJtARqgZIrH1WjZFC0uvf9CHxMIn0X9MWZro,6923 +sympy/ntheory/elliptic_curve.py,sha256=zDRjICf4p3PPfdxKWrPeTcMbAMqPvrZmK2rk9JAbh60,11510 +sympy/ntheory/factor_.py,sha256=5Oqd9QvsW4MR_eH--wbpmoa502yhoLM4g-9gPh5eYKc,75815 +sympy/ntheory/generate.py,sha256=42BWhzsUNv2k3pqdzWyAHAPPydPIaxHkmTIV-8rVSAk,29411 +sympy/ntheory/modular.py,sha256=fA3_ovJcPqrwT2bPjmd4cSGPDyVG6HSM9oP07HP1R_s,7650 +sympy/ntheory/multinomial.py,sha256=rbm3STjgfRbNVbcPeH69qtWktthSCk0sC373NuDM6fU,5073 +sympy/ntheory/partitions_.py,sha256=mE-PQKxaEM20AJJiCgkfhuCAruPbrtnHq3Ad2WrBSM8,5975 +sympy/ntheory/primetest.py,sha256=2qI-5HR_CowK2iH07B4XE2anXxkhSDWw7PPcQkOy70g,20951 +sympy/ntheory/qs.py,sha256=QzIJFHjFG2ncIpoJ7CGMzJ6HudVqB2RNp2yBHBjkSz8,18474 +sympy/ntheory/residue_ntheory.py,sha256=qNJSoRFKAcAcRet5rv3nSF7p3BJJXk9ewJxIDdg1lSE,40653 +sympy/ntheory/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/ntheory/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_bbp_pi.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_continued_fraction.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_digits.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_ecm.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_egyptian_fraction.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_elliptic_curve.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_factor_.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_generate.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_modular.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_multinomial.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_partitions.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_primetest.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_qs.cpython-310.pyc,, +sympy/ntheory/tests/__pycache__/test_residue.cpython-310.pyc,, +sympy/ntheory/tests/test_bbp_pi.py,sha256=-RXXkqMUfVCYeO9HonldOOISDKDaUYCCe5CUgK18L3o,9433 +sympy/ntheory/tests/test_continued_fraction.py,sha256=gfQfLuLVFn-bmEPBcgnU-f0VibJiY8hAEl0FO4V3iVU,3052 +sympy/ntheory/tests/test_digits.py,sha256=jC8GCQVJelFcHMApf5TZU1KXP2oBp48lkkD0bM2TLCo,1182 +sympy/ntheory/tests/test_ecm.py,sha256=Hy9pYRZPuFm7yrGVRs2ob_w3YY3bMEENH_hkDh947UE,2303 +sympy/ntheory/tests/test_egyptian_fraction.py,sha256=tpHcwteuuQAahcPqvgBm4Mwq-efzcHOn8mldijynjlE,2378 +sympy/ntheory/tests/test_elliptic_curve.py,sha256=wc0EOsGo-qGpdevRq1o64htwTOT_YSUzUfyhJC-JVbg,624 +sympy/ntheory/tests/test_factor_.py,sha256=Z1RvrqLttbgp3ZhfJZtCZmUV7GehKGQDSUEEdF0CSSA,25024 +sympy/ntheory/tests/test_generate.py,sha256=ALKzLAcCPIMTr3JC6RJHuOYd6z0aFVaF5-e481icYe8,8069 +sympy/ntheory/tests/test_modular.py,sha256=g73sUXtYNxzbDcq5UnMWT8NodAU8unwRj_E-PpvJqDs,1425 +sympy/ntheory/tests/test_multinomial.py,sha256=8uuj6XlatNyIILOpjJap13CMZmDwrCyGKn9LiIUiLV0,2344 +sympy/ntheory/tests/test_partitions.py,sha256=AkmDpR0IFxo0ret91tRPYUqrgQfQ367okTt2Ee2Vm60,507 +sympy/ntheory/tests/test_primetest.py,sha256=1Pkoi-TNxvB0oT1J5_YXryabyiGgPeXigS_vo_4x_v8,7062 +sympy/ntheory/tests/test_qs.py,sha256=ZCWiWiUULzLDTCz6CsolmVAdvZMZrz3wFrZXd-GtHfM,4481 +sympy/ntheory/tests/test_residue.py,sha256=t3-yaWmZvfkQpjUDqOzgwnTFO0je7BkEU2QKpA-pttU,12884 +sympy/parsing/__init__.py,sha256=KHuyDeHY1ifpVxT4aTOhomazCBYVIrKWd28jqp6YNJ8,125 +sympy/parsing/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/__pycache__/ast_parser.cpython-310.pyc,, +sympy/parsing/__pycache__/mathematica.cpython-310.pyc,, +sympy/parsing/__pycache__/maxima.cpython-310.pyc,, +sympy/parsing/__pycache__/sym_expr.cpython-310.pyc,, +sympy/parsing/__pycache__/sympy_parser.cpython-310.pyc,, +sympy/parsing/ast_parser.py,sha256=PWuAoNPZ6-C8HCYYGCG9tMCgwuMzi_ebyIqFSJCqk6k,2724 +sympy/parsing/autolev/Autolev.g4,sha256=980mo25mLWrQFmhRIg-aqIalUuwktYYaBGTXZ5_XZwA,4195 +sympy/parsing/autolev/__init__.py,sha256=sp5hzv5siVW3xUmhkp0S0iaA0Cz-PVB0HO1zC04pxYs,3611 +sympy/parsing/autolev/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_build_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_listener_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/__pycache__/_parse_autolev_antlr.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__init__.py,sha256=MQ4ZacpTuP-NmruFXKdWLQatoeVJQ8SaBQ2DnYvtyE8,203 +sympy/parsing/autolev/_antlr/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevlexer.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevlistener.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/__pycache__/autolevparser.cpython-310.pyc,, +sympy/parsing/autolev/_antlr/autolevlexer.py,sha256=K7HF_-5dUyAIv1_7GkhTmxqSCanEhCpzJG8fayAEB3Q,13609 +sympy/parsing/autolev/_antlr/autolevlistener.py,sha256=EDb3XkH9Y7CLzxGM-tY-nGqxMGfBHVkqKdVCPxABgRE,12821 +sympy/parsing/autolev/_antlr/autolevparser.py,sha256=BZYJ7IkurRmm44S50pYp_9JHCjT8fr1w5HeksAEPjtg,106291 +sympy/parsing/autolev/_build_autolev_antlr.py,sha256=XOR44PCPo234I_Z1QnneSArY8aPpp4xP4-dycMalQQw,2590 +sympy/parsing/autolev/_listener_autolev_antlr.py,sha256=P5XTo2UjkyDyx4d9kpmWIm6BoCXyOiED9s8Tr3w3Am4,104758 +sympy/parsing/autolev/_parse_autolev_antlr.py,sha256=b9hIaluJUd1V2XIAp1erak6U-c-CwKyDLH1UkYQuvKE,1736 +sympy/parsing/autolev/test-examples/README.txt,sha256=0C4m_nLROeV5J8nMfm3RYEfYgQJqmlHZaCpVD24boQY,528 +sympy/parsing/autolev/test-examples/__pycache__/ruletest1.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest10.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest11.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest12.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest2.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest3.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest4.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest5.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest6.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest7.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest8.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/__pycache__/ruletest9.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/chaos_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/double_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/mass_spring_damper.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/__pycache__/non_min_pendulum.cpython-310.pyc,, +sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.al,sha256=HpTcX2wXzLqmgpp8fcSqNweKjxljk43iYK0wQmBbCDI,690 +sympy/parsing/autolev/test-examples/pydy-example-repo/chaos_pendulum.py,sha256=FSu4TP2BDTQjzYhMkcpRhXbb3kAD27XCyO_EoL55Ack,2274 +sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.al,sha256=wjeeRdCS3Es6ldX9Ug5Du1uaijUTyoXpfTqmhL0uYfk,427 +sympy/parsing/autolev/test-examples/pydy-example-repo/double_pendulum.py,sha256=uU9azTUGrY15BSDtw5T_V-7gmjyhHbXslzkmwBvFjGk,1583 +sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.al,sha256=Gf7OhgRlwqUEXq7rkfbf89yWA23u4uIUJ-buXTyOuXM,505 +sympy/parsing/autolev/test-examples/pydy-example-repo/mass_spring_damper.py,sha256=9ReCAqcUH5HYBgHmop9h5Zx54mfScWZN5L5F6rCHk4w,1366 +sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.al,sha256=p5v40h1nVFrWNqnB0K7GiNQT0b-MqwayYjZxXOY4M8M,362 +sympy/parsing/autolev/test-examples/pydy-example-repo/non_min_pendulum.py,sha256=DdxcWrm3HMQuyyY3Pk6sKHb4RXhQEM_EKY3HYZCP8ec,1503 +sympy/parsing/autolev/test-examples/ruletest1.al,sha256=mDJ02Q1Qm-ShVmGoyjzSfgDJHUOuDrsUg3YMnkpKdUw,176 +sympy/parsing/autolev/test-examples/ruletest1.py,sha256=eIKEFzEwkCFhPF0GTmf6SLuxXT384GqdCJnhiL2U0BQ,555 +sympy/parsing/autolev/test-examples/ruletest10.al,sha256=jKpV8BgX91iQsQDLFOJyaS396AyE5YQlUMxih5o9RK0,781 +sympy/parsing/autolev/test-examples/ruletest10.py,sha256=I1tsQcSAW6wqIguF-7lwlj9D4YZ8kCZqPqTKPUHR9oI,2726 +sympy/parsing/autolev/test-examples/ruletest11.al,sha256=j_q7giq2KIuXVRLWwNlwIlpbhNO6SqBMnLGLcxIkzwk,188 +sympy/parsing/autolev/test-examples/ruletest11.py,sha256=dYTRtXvMDXHiKzXHD2Sh0fcEukob3wr_GbSeqaZrrO8,475 +sympy/parsing/autolev/test-examples/ruletest12.al,sha256=drr2NLrK1ewn4FjMppXycpAUNbZEQ0IAMsdVx8nxk6I,185 +sympy/parsing/autolev/test-examples/ruletest12.py,sha256=ZG36s3PnkT0aKBM9Nx6H0sdJrtoLwaebU9386YSUql8,472 +sympy/parsing/autolev/test-examples/ruletest2.al,sha256=d-QjPpW0lzugaGBg8F6pDl_5sZHOR_EDJ8EvWLcz4FY,237 +sympy/parsing/autolev/test-examples/ruletest2.py,sha256=jrJfb0Jk2FP4GS5pDa0UB5ph0ijEVd1X8meKeZrTVng,820 +sympy/parsing/autolev/test-examples/ruletest3.al,sha256=1TAaOe8GI8-yBWJddfIxwnvScHNmOjSzSaQn0RS_v5k,308 +sympy/parsing/autolev/test-examples/ruletest3.py,sha256=O3K3IQo-HCjAIOSkfz3bDlst7dVUiRwhOZ0q_3jb5LU,1574 +sympy/parsing/autolev/test-examples/ruletest4.al,sha256=qPGlPbdDRrzTDUBeWydAIa7mbjs2o3uX938QAsWJ7Qk,302 +sympy/parsing/autolev/test-examples/ruletest4.py,sha256=WHod5yzKF4TNbEf4Yfxmx9WnimA7NOXqtTjZXR8FsP0,682 +sympy/parsing/autolev/test-examples/ruletest5.al,sha256=VuiKjiFmLK3uEdho0m3pk-n0qm4SNLoLPMRJqjMJ4GY,516 +sympy/parsing/autolev/test-examples/ruletest5.py,sha256=WvUtno1D3BrmFNPYYIBKR_gOA-PaHoxLlSTNDX67dcQ,1991 +sympy/parsing/autolev/test-examples/ruletest6.al,sha256=-HwgTmh_6X3wHjo3PQi7378t8YdizRJClc5Eb5DmjhE,703 +sympy/parsing/autolev/test-examples/ruletest6.py,sha256=vEO0jMOD-KIevAcVexmpvac0MGjN7O_dNipOBJJNzF0,1473 +sympy/parsing/autolev/test-examples/ruletest7.al,sha256=wR9S9rTzO9fyKL6Ofgwzw8XCFCV_p2hBpYotC8TvADI,773 +sympy/parsing/autolev/test-examples/ruletest7.py,sha256=_XvMrMe5r9RLopTrIqMGLhaYvHL1qjteWz9CKcotCL8,1696 +sympy/parsing/autolev/test-examples/ruletest8.al,sha256=P7Nu3Pq2R1mKcuFRc9dRO5jJ1_e5fwWdtqYG8NHVVds,682 +sympy/parsing/autolev/test-examples/ruletest8.py,sha256=8tgbwJ-ir0wiOCsgIFCAu4uD8SieYRrLoLzEfae5YQY,2690 +sympy/parsing/autolev/test-examples/ruletest9.al,sha256=txtZ5RH2p1FvAe6etwetSCH8rLktnpk5z0W72sCOdAA,755 +sympy/parsing/autolev/test-examples/ruletest9.py,sha256=GtqV-Wq2GGJzfblMscAz-KXCzs0P_4XqvA3FIdlPe04,1965 +sympy/parsing/c/__init__.py,sha256=J9CvkNRY-qy6CA06GZYuwTuxdnqas6oUP2g0qLztGro,65 +sympy/parsing/c/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/c/__pycache__/c_parser.cpython-310.pyc,, +sympy/parsing/c/c_parser.py,sha256=o7UohvD8V6feJr74sIbx2NNAyZOLFNJDHtiUPg_rUeg,39331 +sympy/parsing/fortran/__init__.py,sha256=KraiVw2qxIgYeMRTFjs1vkMi-hqqDkxUBv8Rc2gwkCI,73 +sympy/parsing/fortran/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/fortran/__pycache__/fortran_parser.cpython-310.pyc,, +sympy/parsing/fortran/fortran_parser.py,sha256=RpNQR3eNx5vgfzdt0nEZDCB56kF__SnYMaqWN3zla00,11483 +sympy/parsing/latex/LICENSE.txt,sha256=AHvDClj6QKmW53IEcSDeTq8x9REOT5w7X5P8374urKE,1075 +sympy/parsing/latex/LaTeX.g4,sha256=fG0ZUQPwYQOIbcyaPDAkGvcfGs3ZwwMB8ZnKW5yHUDY,5821 +sympy/parsing/latex/__init__.py,sha256=10TctFMpk3AolsniTJR5rQr19QXNqVTx-rl8ZFkHC4s,991 +sympy/parsing/latex/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/_build_latex_antlr.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/_parse_latex_antlr.cpython-310.pyc,, +sympy/parsing/latex/__pycache__/errors.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__init__.py,sha256=TAb79senorEsoYLCLwUa8wg8AUCHzmmZ7tLdi0XGNaE,384 +sympy/parsing/latex/_antlr/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__pycache__/latexlexer.cpython-310.pyc,, +sympy/parsing/latex/_antlr/__pycache__/latexparser.cpython-310.pyc,, +sympy/parsing/latex/_antlr/latexlexer.py,sha256=Y1hmY1VGL5FTSSlToTRQydPnyaLLNy1mDSWx76HaYwM,30502 +sympy/parsing/latex/_antlr/latexparser.py,sha256=ZvonpvTS3vLSOVpas88M3CfNnUhPUDsCCPPk4wBYUGE,123655 +sympy/parsing/latex/_build_latex_antlr.py,sha256=id_4pbcI4nAa0tHumN0lZX0Ubb-BaJ3czGwiQR_jZPE,2777 +sympy/parsing/latex/_parse_latex_antlr.py,sha256=3iUHktfORn60D5SBpRNjSSaxuKlmzEBI5-DilfkkRQ0,20525 +sympy/parsing/latex/errors.py,sha256=adSpvQyWjTLsbN_2KHJ4HuXpY7_U9noeWiG0lskYLgE,45 +sympy/parsing/mathematica.py,sha256=AX5q_9bDARtC0w3bFNmhNKGqe3X7NlprZEvMCbV_vMs,39282 +sympy/parsing/maxima.py,sha256=DhTnXRSAceijyA1OAm86c6TyW9-aeUVoZEELGu0oZtY,1835 +sympy/parsing/sym_expr.py,sha256=-hxarp961eyLtuwUhbg3D3qzy06HrEPZEYpGVcJzAv0,8895 +sympy/parsing/sympy_parser.py,sha256=QA9TRHZwqQ8kqfOPA4EeHfKz1dCqpBppRtVTE61IpO0,43814 +sympy/parsing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/parsing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_ast_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_autolev.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_c_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_fortran_parser.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_implicit_multiplication_application.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_latex.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_latex_deps.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_mathematica.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_maxima.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_sym_expr.cpython-310.pyc,, +sympy/parsing/tests/__pycache__/test_sympy_parser.cpython-310.pyc,, +sympy/parsing/tests/test_ast_parser.py,sha256=lcT8w7mn6UEZ8T-xfA4TqG4Mt7JxY00oHhOW7JtHQfY,803 +sympy/parsing/tests/test_autolev.py,sha256=tQuUFa8YqVdsHPOcUhAwlMKB8Uk08HejDhDCda8lXs0,6647 +sympy/parsing/tests/test_c_parser.py,sha256=yIYdfnaHX9Z93-Cmf6x9C7eysQ-y3_lU-6CGRXN4WL8,154665 +sympy/parsing/tests/test_fortran_parser.py,sha256=SGbawrJ4a780TJAFVMONc7Y3Y8VYgVqsIHxVGaicbxE,11828 +sympy/parsing/tests/test_implicit_multiplication_application.py,sha256=nPzLKcAJJaoZgdLoq1_CXhiWKFBH--p4t6dq4I3sV9A,7448 +sympy/parsing/tests/test_latex.py,sha256=khNyIVANKnQFIE6hR3UdSqlzYdZWDtO0vs6TxhpWDUI,11503 +sympy/parsing/tests/test_latex_deps.py,sha256=oe5vm2eIKn05ZiCcXUaO8X6HCcRmN1qCuTsz6tB7Qrk,426 +sympy/parsing/tests/test_mathematica.py,sha256=ma9YM-Cti4hMhjZym5RMGaesxaWki6p29QROJ4oSs4E,13166 +sympy/parsing/tests/test_maxima.py,sha256=iIwnFm0lYD0-JcraUIymogqEMN3ji0c-0JeNFFGTEDs,1987 +sympy/parsing/tests/test_sym_expr.py,sha256=-wNR7GwvJHVmPSZxSuAuoX1_FJk83O0tcDi09qYY6Jk,5668 +sympy/parsing/tests/test_sympy_parser.py,sha256=5__CszZfy8DAl5JzfsLGsDECRjdT20a3p9cwYBXvAh8,12253 +sympy/physics/__init__.py,sha256=F_yvUMCuBq3HR-3Ai6W4oktBsXRg8KdutFLwT9FFJlY,220 +sympy/physics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/__pycache__/hydrogen.cpython-310.pyc,, +sympy/physics/__pycache__/matrices.cpython-310.pyc,, +sympy/physics/__pycache__/paulialgebra.cpython-310.pyc,, +sympy/physics/__pycache__/pring.cpython-310.pyc,, +sympy/physics/__pycache__/qho_1d.cpython-310.pyc,, +sympy/physics/__pycache__/secondquant.cpython-310.pyc,, +sympy/physics/__pycache__/sho.cpython-310.pyc,, +sympy/physics/__pycache__/wigner.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__init__.py,sha256=moVrcsEw_a8db69dtuwE-aquZ1TAJc7JxHukrYnJuyM,89 +sympy/physics/continuum_mechanics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc,, +sympy/physics/continuum_mechanics/__pycache__/truss.cpython-310.pyc,, +sympy/physics/continuum_mechanics/beam.py,sha256=i3BcVzCsC9AUPjyAcPd5Lfwcpb_9bz9V-cO6N2WlkLU,148566 +sympy/physics/continuum_mechanics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/continuum_mechanics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/__pycache__/test_beam.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/__pycache__/test_truss.cpython-310.pyc,, +sympy/physics/continuum_mechanics/tests/test_beam.py,sha256=IubYZzOkQ9dBcyR_rLA9FxUkFZ_x1BX16MKUvyJaOkE,26879 +sympy/physics/continuum_mechanics/tests/test_truss.py,sha256=dsjtXQoBXcFDacKc55DbZST1L69XGKN0TMtCBnHN5hY,3368 +sympy/physics/continuum_mechanics/truss.py,sha256=C9JPSDutXBS4QFmdqcsClFCtdN9tdGauPD8TYQ4_NF0,28496 +sympy/physics/control/__init__.py,sha256=Z5cPVgXd8BAdxX9iqyLLVyk2n2ry_jiMBHo6crMeLFA,1027 +sympy/physics/control/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/control/__pycache__/control_plots.cpython-310.pyc,, +sympy/physics/control/__pycache__/lti.cpython-310.pyc,, +sympy/physics/control/control_plots.py,sha256=Q25egDhUs-xrlh5oy4ZBlnOqF5pJtQ1SRo28r5nnudY,32222 +sympy/physics/control/lti.py,sha256=EquvSYF2ifqnFfYsnoJuAsRrZHQIm7f6LwmZGbmbW-M,114652 +sympy/physics/control/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/control/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/control/tests/__pycache__/test_control_plots.cpython-310.pyc,, +sympy/physics/control/tests/__pycache__/test_lti.cpython-310.pyc,, +sympy/physics/control/tests/test_control_plots.py,sha256=EDTfKI08wacHtYFKf7HeBi43msqqAvMOhTWf-8RJu3k,15728 +sympy/physics/control/tests/test_lti.py,sha256=QPuNpHlSquTX14-r4YbhNfxh32x_D17jAxtO2aQn5GA,59908 +sympy/physics/hep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/hep/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/hep/__pycache__/gamma_matrices.cpython-310.pyc,, +sympy/physics/hep/gamma_matrices.py,sha256=WlSHLUtMU7NrgLyKEvTntMSYxMZq1r_6o2kqUEAdPaA,24253 +sympy/physics/hep/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/hep/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/hep/tests/__pycache__/test_gamma_matrices.cpython-310.pyc,, +sympy/physics/hep/tests/test_gamma_matrices.py,sha256=iKqICj0bP7EK0sSuYFsPdPkDTbHGa6J_LMPZAzv1j4o,14722 +sympy/physics/hydrogen.py,sha256=R2wnNi1xB-WTQ8Z9aPUhX9Z8mQ8TdhCM1JAZIkyXgjw,7594 +sympy/physics/matrices.py,sha256=jHfbWkzL2myFt-39kodQo5wPubBxNZKXlljuSxZL4bE,3836 +sympy/physics/mechanics/__init__.py,sha256=57XHPOZF3y2-dLcrfwECEgjFthUYeQncmft3GZYKyOY,2033 +sympy/physics/mechanics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/body.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/functions.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/joint.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/jointsmethod.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/kane.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/lagrange.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/linearize.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/method.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/models.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/particle.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/rigidbody.cpython-310.pyc,, +sympy/physics/mechanics/__pycache__/system.cpython-310.pyc,, +sympy/physics/mechanics/body.py,sha256=eqQbmsPOZnad0aH326N_FfZZWtzs4IIvbugwfkLlHtQ,19088 +sympy/physics/mechanics/functions.py,sha256=GbhUZWZD0HqLGh03ojXfnATxM-oxM708AmFtCgOjJFE,25557 +sympy/physics/mechanics/joint.py,sha256=hTBI8wd7ylnRgR1hrW-Xg9pTiFHBNgA6j5MWfTJMzdU,82739 +sympy/physics/mechanics/jointsmethod.py,sha256=FmccW8429JLfg9-Gxc4oeekrPi2ig77gYZJ2x7qVzMA,8530 +sympy/physics/mechanics/kane.py,sha256=L-imRN4zBCtFXajjyQ4-2peMULqysCbVEUq69JpQbgA,30567 +sympy/physics/mechanics/lagrange.py,sha256=_BM2q2euBxiVj-5OVMOkuzu9D012MP5AC6LnOENwbX0,18338 +sympy/physics/mechanics/linearize.py,sha256=sEX52OQP-pJ_pIlw8oVv01oQPeHiPf0LCm1GMuIn1Yo,15615 +sympy/physics/mechanics/method.py,sha256=2vFRhA79ra4HR6AzVBHMr3oNncrcqgLLMRqdyif0DrI,660 +sympy/physics/mechanics/models.py,sha256=9q1g3I2xYpuTMi-v9geswEqxJWTP3RjcOquRfzMhHzM,6463 +sympy/physics/mechanics/particle.py,sha256=F-pPvcmfxdacZxSIwnaXJ-W9KslIEnCw7ljCLlxVk4Y,7577 +sympy/physics/mechanics/rigidbody.py,sha256=YTWj-awmWw-OZQQ6wn_HxrTnmSu0Hvhd1TJxRVU62LI,11192 +sympy/physics/mechanics/system.py,sha256=Un6ep47tygf1Vdp-8G2WS6uT-FCqOBRwrDUdonFd_vA,18671 +sympy/physics/mechanics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/mechanics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_body.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_joint.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_jointsmethod.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane2.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane3.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_kane4.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_lagrange.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_lagrange2.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_linearize.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_method.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_models.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_particle.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_rigidbody.cpython-310.pyc,, +sympy/physics/mechanics/tests/__pycache__/test_system.cpython-310.pyc,, +sympy/physics/mechanics/tests/test_body.py,sha256=fV3dp94uFbE7ZHb7DkD0fJ1UgbSdc1NVVy0yRuYZfuk,11213 +sympy/physics/mechanics/tests/test_functions.py,sha256=W1k7uhYHs1Ayvtr4q8P_S8cUiwOuaz-UdE1svV4WpCQ,11033 +sympy/physics/mechanics/tests/test_joint.py,sha256=fordUBSC7clvKTuKCtb-KhrOGUonMF1w-91G-pawzKk,53035 +sympy/physics/mechanics/tests/test_jointsmethod.py,sha256=0soorl_p-tVwRx0jWreexWLXBk3v13ZnW9vJ0U6t6Pg,8935 +sympy/physics/mechanics/tests/test_kane.py,sha256=rFhtyVrr4Tifdwwgq-vedU8BneLPa_zVcUNWpHAiEvA,20599 +sympy/physics/mechanics/tests/test_kane2.py,sha256=3MweQ_qfbyc8WqcSvvj7iKQLRdMlki9S6uNyd8ZIDN0,19111 +sympy/physics/mechanics/tests/test_kane3.py,sha256=rc4BwlH3VGV21UH_s6I9y1CwHBwvdy3xvkEDS3lAJHQ,14432 +sympy/physics/mechanics/tests/test_kane4.py,sha256=a7CFmnz-MFbQbfop_tAhRUAHk7BJZEfa9PlcX2K8Y0Y,4722 +sympy/physics/mechanics/tests/test_lagrange.py,sha256=iuHomulBF8MafLeorKGaLHUEF8CvFhXcxEtN0hk1akM,10119 +sympy/physics/mechanics/tests/test_lagrange2.py,sha256=HCnDemnFD1r3DIT4oWnypcsZKvF1BA96_MMYHE7Q_xo,1413 +sympy/physics/mechanics/tests/test_linearize.py,sha256=G4XdGFp6lIUwNJ6qm77X24ZPKgGcyxYBuCv61WeROXM,11826 +sympy/physics/mechanics/tests/test_method.py,sha256=L7CnsvbQC-U7ijbSZdu7DEr03p88OLj4IPvFJ_3kCDo,154 +sympy/physics/mechanics/tests/test_models.py,sha256=X7lrxTIWuTP7GgpYyGVmOG48zG4UDWV99FACXFO5VMA,5091 +sympy/physics/mechanics/tests/test_particle.py,sha256=j66nmXM7R_TSxr2Z1xywQKD-al1z62I15ozPaywN1n0,2153 +sympy/physics/mechanics/tests/test_rigidbody.py,sha256=QvAAtofAqA4oQaYvxN1gK7QJf6TGrI3TqY5fHjbP200,5247 +sympy/physics/mechanics/tests/test_system.py,sha256=vRxvOH56wuWRTygmTcJJZAlB6Bw2Vlhcr9q6A526_WA,8713 +sympy/physics/optics/__init__.py,sha256=0UmqIt2-u8WwNkAqsnOVt9VlkB9K0CRIJYiQaltJ73w,1647 +sympy/physics/optics/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/optics/__pycache__/gaussopt.cpython-310.pyc,, +sympy/physics/optics/__pycache__/medium.cpython-310.pyc,, +sympy/physics/optics/__pycache__/polarization.cpython-310.pyc,, +sympy/physics/optics/__pycache__/utils.cpython-310.pyc,, +sympy/physics/optics/__pycache__/waves.cpython-310.pyc,, +sympy/physics/optics/gaussopt.py,sha256=xMoYUyPyh2ycyNj5gomy_0PkNKKHa9XRlE39mZUQaqI,20892 +sympy/physics/optics/medium.py,sha256=cys0tWGi1VCPWMTZuKadcN_bToz_bqKsDHSEVzuV3CE,7124 +sympy/physics/optics/polarization.py,sha256=mIrZiOVXetGtKkLxl8Llaf2Z9coWenf6JKrClh4W8yU,21434 +sympy/physics/optics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/optics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_gaussopt.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_medium.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_polarization.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_utils.cpython-310.pyc,, +sympy/physics/optics/tests/__pycache__/test_waves.cpython-310.pyc,, +sympy/physics/optics/tests/test_gaussopt.py,sha256=QMXJw_6mFCC3918b-pc_4b_zgO8Hsk7_SBvMupbEi5I,4222 +sympy/physics/optics/tests/test_medium.py,sha256=RxG7N3lzmCO_8hIoKyPnDKffmk8QFzA9yamu1_mr_dE,2194 +sympy/physics/optics/tests/test_polarization.py,sha256=81MzyA29HZckg_Ss-88-5o0g9augDqCr_LwcJIiXuA0,2605 +sympy/physics/optics/tests/test_utils.py,sha256=SjicjAptcZGwuX-ib_Lq7PlGONotvo2XJ4p3JA9iNVI,8553 +sympy/physics/optics/tests/test_waves.py,sha256=PeFfrl7MBkWBHdc796sDDYDuhGepat3DQk7PmyTXVnw,3397 +sympy/physics/optics/utils.py,sha256=qoSlzujMTHDxIZvBQPJ_cF2PxB-awyXVqCndriUd-PQ,22154 +sympy/physics/optics/waves.py,sha256=Iw-9gGksvWhPmQ_VepmI90ekKyzHdPlq6U41wdM4ikI,10042 +sympy/physics/paulialgebra.py,sha256=1r_qDBbVyl836qIXlVDdoF89Z9wedGvWIkHAbwQaK-4,6002 +sympy/physics/pring.py,sha256=SCMGGIcEhVoD7dwhY7_NWL1iKwo7OfgKdmm2Ok_9Xl0,2240 +sympy/physics/qho_1d.py,sha256=ZXemUsa_b0rLtPVTUkgAkZQ1Ecu2eIZxaiNSSXW0PDk,2005 +sympy/physics/quantum/__init__.py,sha256=RA2xbM7GhFq3dVNTna3odlTJYHqNerxjNeZ1kwigHiw,1705 +sympy/physics/quantum/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/anticommutator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/boson.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/cartesian.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/cg.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/circuitplot.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/circuitutils.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/commutator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/constants.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/dagger.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/density.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/fermion.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/gate.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/grover.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/hilbert.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/identitysearch.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/innerproduct.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/matrixcache.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/matrixutils.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operator.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operatorordering.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/operatorset.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/pauli.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/piab.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qapply.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qasm.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qexpr.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qft.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/qubit.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/represent.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/sho1d.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/shor.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/spin.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/state.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/tensorproduct.cpython-310.pyc,, +sympy/physics/quantum/__pycache__/trace.cpython-310.pyc,, +sympy/physics/quantum/anticommutator.py,sha256=TH0mPF3Dk9mL5fa2heuampDpwWFxxh3HCcg4g2uNQ_E,4446 +sympy/physics/quantum/boson.py,sha256=cEH8dcPXunognApc69Y6TSJRMZ63P20No6tB2xGHynQ,6313 +sympy/physics/quantum/cartesian.py,sha256=9R9VDYLV1Xe-GkA9TQbj8PVlBLaD0fF6KXfHJ1ze5as,9092 +sympy/physics/quantum/cg.py,sha256=hPkgraNAWHIC-b0Pr0IwiY_gfR9pthQC6IuNI89J4dI,23331 +sympy/physics/quantum/circuitplot.py,sha256=SacQMhPyDhizKmGRNEs1vtXph8lR6bMn5bVJI4rJiXg,11799 +sympy/physics/quantum/circuitutils.py,sha256=mrQNUDbwM3LV1NZ1EqVpXyOY2mOXCBVZW7cQTiCxUaM,13882 +sympy/physics/quantum/commutator.py,sha256=7IiNnFYxxi9EfElCFtMLEQccb6nB-jIeq4x3IlIqzKs,7521 +sympy/physics/quantum/constants.py,sha256=20VRATCkSprSnGFR5ejvMEYlWwEcv1B-dE3RPqPTQ9k,1420 +sympy/physics/quantum/dagger.py,sha256=KOeHXb52hvR1IbeNwlNU30KPiD9xv7S1a2dowkQqBLM,2428 +sympy/physics/quantum/density.py,sha256=vCH8c4Fu5lcrT0PsuBqEK7eWnyHtCRwVx4wSh3f07ME,9743 +sympy/physics/quantum/fermion.py,sha256=9umlSpm6pKoplH7hRRHbuwvkvdM98A9GGNZ6yeNJf_o,4506 +sympy/physics/quantum/gate.py,sha256=T_VkbtJEN0rbOB8wrlZFkI7NU1XJ2MGyEx9PX3GCV_4,42487 +sympy/physics/quantum/grover.py,sha256=Cu2EPTOWpfyxYMVOdGBZez8SBZ2i2QEUmHnTiPPSi-M,10454 +sympy/physics/quantum/hilbert.py,sha256=qrja92vF7BUeSyHOLKVX8-XKcPGT7QaQMWrqWXjRNus,19632 +sympy/physics/quantum/identitysearch.py,sha256=Zh_ji5J0YeAy2AezsQcHV9W2icWoaa3ZwTbfjCCQmJo,27607 +sympy/physics/quantum/innerproduct.py,sha256=K4tmyWYMlgzkTTXjs82PzEC8VU4jm2J6Qic4YmAM7SQ,4279 +sympy/physics/quantum/matrixcache.py,sha256=S6fPkkYmfX8ELBOc9EST-8XnQ1gtpSOBfd2KwLGKdYo,3587 +sympy/physics/quantum/matrixutils.py,sha256=D5ipMBRCh2NsxIy4F6ZLQAF4Y84-2rKKC-czCVZ22Ds,8213 +sympy/physics/quantum/operator.py,sha256=zxPohzuo4H_veqo_Lkws1mN5mKufKlK5JZrgpxQXABM,19311 +sympy/physics/quantum/operatorordering.py,sha256=smjToA0lj6he22d9R61EL2FSNXFz9oTIF8x5UOd4RNs,11597 +sympy/physics/quantum/operatorset.py,sha256=W8rYUrh167nkZcoXCTFscZ1ZvBT6WXkMfmKzRks3edE,9598 +sympy/physics/quantum/pauli.py,sha256=lzxWFHXqxKWRiYK99QCo9zuVG9eVXiB8vFya7TvrVxQ,17250 +sympy/physics/quantum/piab.py,sha256=Zjb2cRGniVDV6e35gjP4uEpI4w0C7YGQIEXReaq_z-E,1912 +sympy/physics/quantum/qapply.py,sha256=E6hH0w7pMHaXOixT3FWkcBJm56Yoi8B93wedgcH3XQY,7147 +sympy/physics/quantum/qasm.py,sha256=UWpcUIBgkK55SmEBZlpmz-1KGHZvW7dNeSVG8tHr44A,6288 +sympy/physics/quantum/qexpr.py,sha256=UD2gBfjYRnHcqKYk-Jhex8dOoxNProadx154vejvtB4,14005 +sympy/physics/quantum/qft.py,sha256=Iy6yd41lENuCeU5jLXY7O3E_Sc3SAHCN3X5bE0sQiiU,6352 +sympy/physics/quantum/qubit.py,sha256=OyVzGFycgwyn8ZvsCNYsuDmG801JurfKwlKxVDHIBCo,26007 +sympy/physics/quantum/represent.py,sha256=b_mEm3q-gZbIV5x5Vl6pzfyJytqlp_a98xpfse2AfgI,18707 +sympy/physics/quantum/sho1d.py,sha256=ZroR_FjxmjOmDcd0Fm04vWKTGCpvLaEu4NiuplKm708,20867 +sympy/physics/quantum/shor.py,sha256=nHT2m4msS5gyQLYPIo2X6XcF7y0pTRZYJUYxZG0YCUk,5504 +sympy/physics/quantum/spin.py,sha256=3h9uGC5vJcnu3qRzXnZr-nUNyHkC4AvIOB-rBmbliJ4,72948 +sympy/physics/quantum/state.py,sha256=ISVtxmQjQL28neAcvyLDD6QJtLAFPwotCBeArPmDuFc,30975 +sympy/physics/quantum/tensorproduct.py,sha256=uBpy2037T1bCxZsiFoIAzHQru2Yi2Om8PFDtdCq5Nas,14960 +sympy/physics/quantum/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/quantum/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_anticommutator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_boson.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_cartesian.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_cg.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_circuitplot.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_circuitutils.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_commutator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_constants.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_dagger.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_density.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_fermion.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_gate.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_grover.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_hilbert.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_identitysearch.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_innerproduct.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_matrixutils.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operator.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operatorordering.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_operatorset.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_pauli.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_piab.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qapply.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qasm.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qexpr.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qft.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_qubit.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_represent.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_sho1d.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_shor.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_state.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_tensorproduct.cpython-310.pyc,, +sympy/physics/quantum/tests/__pycache__/test_trace.cpython-310.pyc,, +sympy/physics/quantum/tests/test_anticommutator.py,sha256=ckWHKwQFiAMWcDaYSa_26vi_GIsvs32_0O62I5lGsr8,1304 +sympy/physics/quantum/tests/test_boson.py,sha256=BZjdrZ-F1QhyhDqfK4Zc1VEFBJi1PeiPjMpfBcHekfo,1676 +sympy/physics/quantum/tests/test_cartesian.py,sha256=b8eBLwmL8ize-a30TMDkoWuDym02PvBjr7ayfLwaR_I,4112 +sympy/physics/quantum/tests/test_cg.py,sha256=pw14QQ6XBTkK35021E_nDqcvXdOi4bLiPlkddyE865s,8878 +sympy/physics/quantum/tests/test_circuitplot.py,sha256=c3v9wUzLHUH-eBVGj6_broVhHkioNwpaaApTDAJEflU,2096 +sympy/physics/quantum/tests/test_circuitutils.py,sha256=GrJAWRQVH_l8EIHrj1ve2jtxske72IriQ3lo94fqrVQ,13187 +sympy/physics/quantum/tests/test_commutator.py,sha256=keBstGDpNITFRr06uVFrka_Lje56g6oFoJQEpZXmnYw,2727 +sympy/physics/quantum/tests/test_constants.py,sha256=KBmYPIF49Sq34lbzbFCZRYWSyIdhnR3AK3q-VbU6grU,338 +sympy/physics/quantum/tests/test_dagger.py,sha256=PR19goU60RXL3aU3hU2CJ3VyrlGeP6x_531nI9mqvm8,2009 +sympy/physics/quantum/tests/test_density.py,sha256=EyxiEgyc0nDSweJwI0JUwta7gZ81TVHCl7YDEosTrvI,9718 +sympy/physics/quantum/tests/test_fermion.py,sha256=bFaOWjPHv5HNR10Jvk4i9muJ3MQIyznPWZMtDCtKrZM,1135 +sympy/physics/quantum/tests/test_gate.py,sha256=7oBX1HoWnrYtHjABRoqv_wQDB9B829E99fdcJzaqawM,12496 +sympy/physics/quantum/tests/test_grover.py,sha256=uze62AG6H4x2MYJJA-EY3NtkqwvrDIQ2kONuvIRQiZ4,3640 +sympy/physics/quantum/tests/test_hilbert.py,sha256=IGP6rc2-b3we9dRDbpRniFAhQwp_TYtMfFzxusAprx0,2643 +sympy/physics/quantum/tests/test_identitysearch.py,sha256=3YGrXCsFLhLtN5MRyT5ZF8ELrSdkvDKTv6xKM4i2ims,17745 +sympy/physics/quantum/tests/test_innerproduct.py,sha256=37tT8p6MhHjAYeoay1Zyv7gCs-DeZQi4VdwUH2IffDE,1483 +sympy/physics/quantum/tests/test_matrixutils.py,sha256=3wmKKRhfRuwdQWitWE2mJEHr-TUKn6ixNb_wPWs8wRw,4116 +sympy/physics/quantum/tests/test_operator.py,sha256=BZNYANH2w2xfOkqFA3oIS_Kl1KnwnDUroV7d9lQ3IdY,8164 +sympy/physics/quantum/tests/test_operatorordering.py,sha256=CNMvvTNGNSIXPGLaYjxAOFKk-2Tn4yp3L9w-hc1IMnE,1402 +sympy/physics/quantum/tests/test_operatorset.py,sha256=DNfBeYBa_58kSG7PM5Ilo6xnzek8lSiAGX01uMFRYqI,2628 +sympy/physics/quantum/tests/test_pauli.py,sha256=Bhsx_gj5cpYv4BhVJRQohxlKk_rcp4jHtSRlTP-m_xs,4940 +sympy/physics/quantum/tests/test_piab.py,sha256=8ndnzyIsjF4AOu_9k6Yqap_1XUDTbiGnv7onJdrZBWA,1086 +sympy/physics/quantum/tests/test_printing.py,sha256=wR45NMA2w242-qnAlMjyOPj2yvwDbCKuBDh_V2sekr8,30294 +sympy/physics/quantum/tests/test_qapply.py,sha256=uHw3Crt5Lv0t6TV9jxmNwPVbiWGzFMaLZ8TJZfB1-Mg,6022 +sympy/physics/quantum/tests/test_qasm.py,sha256=ZvMjiheWBceSmIM9LHOL5fiFUl6HsUo8puqdzywrhkc,2976 +sympy/physics/quantum/tests/test_qexpr.py,sha256=emcGEqQeCv-kVJxyfX66TZxahJ8pYznFLE1fyyzeZGc,1517 +sympy/physics/quantum/tests/test_qft.py,sha256=CQWIKZFSpkUe5X7AF27EqVwZ4l0Zqycl3bdYgVZj3Hs,1861 +sympy/physics/quantum/tests/test_qubit.py,sha256=LQNaOuvXc-glRifQBlsXattAQB-yKHvmNMw68_JoM_c,8957 +sympy/physics/quantum/tests/test_represent.py,sha256=rEc_cirIJvoU1xANuOTkMjJHdr6DluP4J9sWD2D8Xpc,5166 +sympy/physics/quantum/tests/test_sho1d.py,sha256=nc75ZE5XXtrc88OcfB5mAGh01Wpf3d4Rbsu8vLJPTC8,4684 +sympy/physics/quantum/tests/test_shor.py,sha256=3a3GCg6V5_mlJ2bltoXinGMGvlSxpq7GluapD_3SZaQ,666 +sympy/physics/quantum/tests/test_spin.py,sha256=LOIPNGWalfPLL7DNAaiLCp4J_G1mZpUYmTCNx3kjqgw,344807 +sympy/physics/quantum/tests/test_state.py,sha256=UjfOdwRzNXHK0AMhEaI431eMNjVUK7glqiGxOXJEC50,6741 +sympy/physics/quantum/tests/test_tensorproduct.py,sha256=UncgjQFeJX3BOdHy8UYbb_Lwit67CfNuwLaFYRmyKUI,4703 +sympy/physics/quantum/tests/test_trace.py,sha256=dbpTXcJArWRR_Hh5JTuy2GJIfgjVo6zS20o5mdVEGH4,3057 +sympy/physics/quantum/trace.py,sha256=2ZqN9IEsz3LKHTLV8ZDwTK0sM5PfwL0p2sYet0N7Gis,6397 +sympy/physics/secondquant.py,sha256=FvAm6mVUVVRxaYPzqn4qwhkZCvN8LA8xUFKjnkMpPdw,90400 +sympy/physics/sho.py,sha256=K8P9FAdZr6UfQKYZO9TlhDUqUd3YsMekXCsKy2HhaY0,2480 +sympy/physics/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_clebsch_gordan.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_hydrogen.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_paulialgebra.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_physics_matrices.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_pring.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_qho_1d.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_secondquant.cpython-310.pyc,, +sympy/physics/tests/__pycache__/test_sho.cpython-310.pyc,, +sympy/physics/tests/test_clebsch_gordan.py,sha256=HdmpjVHZ1JandoZrGwFb7YshkmEkcvt3jLLVxZ13UvA,8563 +sympy/physics/tests/test_hydrogen.py,sha256=kohRIR6JojE_GWYnlzLsMMgdhoKd8whazs0mq7cCTQc,4987 +sympy/physics/tests/test_paulialgebra.py,sha256=tyshEMsLNPR4iYzoAbPGZRZ-e_8t7GDP_xyjRyhepeQ,1477 +sympy/physics/tests/test_physics_matrices.py,sha256=Dha8iQRhzxLcl7TKSA6QP0pnEcBoqtj_Ob6tx01SMwI,2948 +sympy/physics/tests/test_pring.py,sha256=XScQQO9RhRrlqSII_ZyyOUpE-zs-7wphSFCZq2OuFnE,1261 +sympy/physics/tests/test_qho_1d.py,sha256=LD9WU-Y5lW7bVM7MyCkSGW9MU2FZhVjMB5Zk848_q1M,1775 +sympy/physics/tests/test_secondquant.py,sha256=VgG8NzcFmIkhFbKZpbjjzV4W5JOaJHGj9Ut8ugWM2UM,48450 +sympy/physics/tests/test_sho.py,sha256=aIs1f3eo6hb4ErRU8xrr_h_yhTmRx-fQgv9n27SfsLM,693 +sympy/physics/units/__init__.py,sha256=DVvWy9qNRm742NFGcBpybFY20ZK3BU7DWNbLMTXYiFo,12386 +sympy/physics/units/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/__pycache__/dimensions.cpython-310.pyc,, +sympy/physics/units/__pycache__/prefixes.cpython-310.pyc,, +sympy/physics/units/__pycache__/quantities.cpython-310.pyc,, +sympy/physics/units/__pycache__/unitsystem.cpython-310.pyc,, +sympy/physics/units/__pycache__/util.cpython-310.pyc,, +sympy/physics/units/definitions/__init__.py,sha256=F3RyZc1AjM2Ch5b27Tt-VYdZ1HAIWvhgtQQQTfMiN6w,7470 +sympy/physics/units/definitions/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/definitions/__pycache__/dimension_definitions.cpython-310.pyc,, +sympy/physics/units/definitions/__pycache__/unit_definitions.cpython-310.pyc,, +sympy/physics/units/definitions/dimension_definitions.py,sha256=5r_WDnyWFX0T8bTjDA6pnr5PqRKv5XGTm0LuJrZ6ffM,1745 +sympy/physics/units/definitions/unit_definitions.py,sha256=kldfMjhOFdJAbYgZiJPUFtyUVINovDf4XTTC0mkoiDU,14374 +sympy/physics/units/dimensions.py,sha256=B2jT7BEsyCSZmUxH6RYrP9gVGeXLn0nLhgMT9gFODW4,20911 +sympy/physics/units/prefixes.py,sha256=ENV04BUHeebXK2U8jf7ZQdYQ-dZUGm1K2m6BYwJYF2w,6224 +sympy/physics/units/quantities.py,sha256=r5E231CULmsSEM7Rh7zfcTPuR85_X0CwRCVU_nDsek0,4671 +sympy/physics/units/systems/__init__.py,sha256=jJuvdc15c83yl11IuvhyjijwOZ9m1JGgZOgKwKv2e2o,244 +sympy/physics/units/systems/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/cgs.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/length_weight_time.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/mks.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/mksa.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/natural.cpython-310.pyc,, +sympy/physics/units/systems/__pycache__/si.cpython-310.pyc,, +sympy/physics/units/systems/cgs.py,sha256=gXbX8uuZo7lcYIENA-CpAnyS9WVQy-vRisxlQm-198A,3702 +sympy/physics/units/systems/length_weight_time.py,sha256=DXIDSWdhjfxGLA0ldOziWhwQjzTAs7-VQTNCHzDvCgY,7004 +sympy/physics/units/systems/mks.py,sha256=Z3eX9yWK9BdvEosCROK2qRKtKFYOjtQ50Jk6vFT7AQY,1546 +sympy/physics/units/systems/mksa.py,sha256=U8cSI-maIuLJRvpKLBuZA8V19LDRYVc2I40Rao-wvjk,2002 +sympy/physics/units/systems/natural.py,sha256=43Odvmtxdpbz8UcW_xoRE9ArJVVdF7dgdAN2ByDAXx4,909 +sympy/physics/units/systems/si.py,sha256=YBPUuovW3-JBDZYuStXXRaC8cfzE3En3K5MjNy5pLJk,14478 +sympy/physics/units/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/units/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_dimensions.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_dimensionsystem.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_prefixes.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_quantities.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_unit_system_cgs_gauss.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_unitsystem.cpython-310.pyc,, +sympy/physics/units/tests/__pycache__/test_util.cpython-310.pyc,, +sympy/physics/units/tests/test_dimensions.py,sha256=lzkgGfEXMHxB8Izv7nRTN2uOEPh65LXPYaG8Kr5H05o,6122 +sympy/physics/units/tests/test_dimensionsystem.py,sha256=s2_2RAJwOaPOTvyIiAO9SYap374ytZqWbatWkLCnbSU,2717 +sympy/physics/units/tests/test_prefixes.py,sha256=IFeF1tq9SkyqJLOLy5h42oMW7PDJ1QKtvyu0EbN3rxY,2198 +sympy/physics/units/tests/test_quantities.py,sha256=_OmQ1qBPud8-lVesvVNhQLrwRh9qp7rXMSGzqTtqCr0,20055 +sympy/physics/units/tests/test_unit_system_cgs_gauss.py,sha256=JepTWt8yGdtv5dQ2AKUKb9fxpuYqLWOp0oOmzov9vfY,3173 +sympy/physics/units/tests/test_unitsystem.py,sha256=1Xh78_8hbv-yP4ICWI_dUrOnk3cimlvP_VhO-EXOa7Q,3254 +sympy/physics/units/tests/test_util.py,sha256=f2pOxVLArai5EwRAriPh9rQdxIyhFpZ4v7WEB0CI-SI,8465 +sympy/physics/units/unitsystem.py,sha256=UXFcmQoI8Hl89v4ixEfh35g__o6AgQPzgvLJhCLIFtA,7618 +sympy/physics/units/util.py,sha256=dgMkwlaYWO2D1QwSpGKFfYluqzdN6TUp-aIgXo8-W1o,9602 +sympy/physics/vector/__init__.py,sha256=jZmrNB6ZfY7NOP8nx8GWcfI2Ixb2mv7lXuGHn63kyOw,985 +sympy/physics/vector/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/vector/__pycache__/dyadic.cpython-310.pyc,, +sympy/physics/vector/__pycache__/fieldfunctions.cpython-310.pyc,, +sympy/physics/vector/__pycache__/frame.cpython-310.pyc,, +sympy/physics/vector/__pycache__/functions.cpython-310.pyc,, +sympy/physics/vector/__pycache__/point.cpython-310.pyc,, +sympy/physics/vector/__pycache__/printing.cpython-310.pyc,, +sympy/physics/vector/__pycache__/vector.cpython-310.pyc,, +sympy/physics/vector/dyadic.py,sha256=qDsDiWZ8nTOVKKjST3MasskWUvrv8o8CZeLTXfJjp6Y,19538 +sympy/physics/vector/fieldfunctions.py,sha256=1tzyV2iH6-UIPJ6W4UhgOZHTGxAbnWhmdTxbz12Z528,8593 +sympy/physics/vector/frame.py,sha256=5wHaV4FIAC0XjvX5ziFmBwB2P2wKPk1Sipb6ao6STn0,52933 +sympy/physics/vector/functions.py,sha256=Fp3Fx0donNUPj9rkZ03xFC8HhUys4UvogK69ah2Sd3o,24583 +sympy/physics/vector/point.py,sha256=9hUKwsM_5npy9FuDSHe9eiOLQLfmZZE49rVxwEhPT2U,20446 +sympy/physics/vector/printing.py,sha256=iQmyZQib-9Oa7_suxwHplJ9HW198LPGmptDldwqRl20,11792 +sympy/physics/vector/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/physics/vector/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_dyadic.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_fieldfunctions.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_frame.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_output.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_point.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/physics/vector/tests/__pycache__/test_vector.cpython-310.pyc,, +sympy/physics/vector/tests/test_dyadic.py,sha256=09VKP_uSaiJny5LxNlkSMwU_LdQhZ6yGqoD1GG4dc2U,4292 +sympy/physics/vector/tests/test_fieldfunctions.py,sha256=FUjh18QzB6dXSau9iHutb36o28faSa7T9sB0icpja-M,5825 +sympy/physics/vector/tests/test_frame.py,sha256=sk4atyErDljoa9Q4YDDWoubBOxfkSXR3mKTmYAO_2vE,26102 +sympy/physics/vector/tests/test_functions.py,sha256=5gR01x9HlqM_DViSlu7Yf1m5NQWI2oqBe1a3dRkBcIc,20763 +sympy/physics/vector/tests/test_output.py,sha256=TFqso2YUb5zw4oX6H206Wu0XTwJZFKPY92gd68ktMN4,2631 +sympy/physics/vector/tests/test_point.py,sha256=B6Yk7K-ouyN-VBXycDJV4sOYrPyFf8a_Q-Ytx7vq1mo,12257 +sympy/physics/vector/tests/test_printing.py,sha256=kptiX3xy_xPSyg8f4xZ2jJnorynPvfTenOBtntsYXaY,10433 +sympy/physics/vector/tests/test_vector.py,sha256=Jm6DeizQxKY-CD7722--Ko073bcN4jJJ-geRoNkofs4,9458 +sympy/physics/vector/vector.py,sha256=o9Ov2GD6-_4eZwqpNkaB1DvCioSXAVtR0HFoRneNEEc,27533 +sympy/physics/wigner.py,sha256=4jYcv62gfHJGlJfYcbn06BFmNIs5JCiEBNnxUbg2Oyo,37605 +sympy/plotting/__init__.py,sha256=hAdOjai8-laj79rLJ2HZbiW1okXlz0p1ck-CoeNU6m8,526 +sympy/plotting/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/__pycache__/experimental_lambdify.cpython-310.pyc,, +sympy/plotting/__pycache__/plot.cpython-310.pyc,, +sympy/plotting/__pycache__/plot_implicit.cpython-310.pyc,, +sympy/plotting/__pycache__/textplot.cpython-310.pyc,, +sympy/plotting/experimental_lambdify.py,sha256=wIvB02vdrI-nEJX3TqInsf0v8705JI5lcVgMJsJbtO0,22879 +sympy/plotting/intervalmath/__init__.py,sha256=fQV7sLZ9NHpZO5XGl2ZfqX56x-mdq-sYhtWEKLngHlU,479 +sympy/plotting/intervalmath/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-310.pyc,, +sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-310.pyc,, +sympy/plotting/intervalmath/interval_arithmetic.py,sha256=OibkI5I0i6_NpFd1HEl48d_R4PRWofUoOS4HYQBkVOc,15530 +sympy/plotting/intervalmath/interval_membership.py,sha256=1VpO1T7UjvPxcMySC5GhZl8-VM_DxIirSWC3ZGmxIAY,2385 +sympy/plotting/intervalmath/lib_interval.py,sha256=WY1qRtyub4MDJaZizw6cXQI5NMEIXBO9UEWPEI80aW8,14809 +sympy/plotting/intervalmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/intervalmath/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_interval_functions.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_interval_membership.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/__pycache__/test_intervalmath.cpython-310.pyc,, +sympy/plotting/intervalmath/tests/test_interval_functions.py,sha256=gdIo5z54tIbG8hDaGd3I8rBDP67oetMZWWdM-uvt1ec,9862 +sympy/plotting/intervalmath/tests/test_interval_membership.py,sha256=D1KjcrLxAwOmDEUqA-8TCqkFWGtmeerR9KwmzS7tyjk,4216 +sympy/plotting/intervalmath/tests/test_intervalmath.py,sha256=ndBMczrs6xYMN5RGnyCL9yq7pNUxrXHTSU1mdUsp5tU,9034 +sympy/plotting/plot.py,sha256=eTKGJmFyTycCNb6CquLGutB9d92PdlllxW1Wn0W6Q-k,92139 +sympy/plotting/plot_implicit.py,sha256=2kRJ0YRrsDKad8Q34UXdy4lOVGKh6LvL6LokPVDZN8A,15683 +sympy/plotting/pygletplot/__init__.py,sha256=DM7GURQbdSfcddHz23MxOShatBFc26tP_sd3G8pGCQE,3732 +sympy/plotting/pygletplot/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/color_scheme.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/managed_window.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_axes.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_camera.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_controller.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_curve.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_interval.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_mode.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_mode_base.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_modes.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_object.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_rotation.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_surface.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/plot_window.cpython-310.pyc,, +sympy/plotting/pygletplot/__pycache__/util.cpython-310.pyc,, +sympy/plotting/pygletplot/color_scheme.py,sha256=NgPUamkldygfrIPj0LvC_1AzhscVtg18FSudElvFYB8,12522 +sympy/plotting/pygletplot/managed_window.py,sha256=N7AKtM7ELfIJLie6zvI-J6-OQRBnMZu6AL1USz7hFEk,3072 +sympy/plotting/pygletplot/plot.py,sha256=s-5AJB0KelHs9WGoFIVIdYrOoMXfdpnM5-G2cF8xzDQ,13352 +sympy/plotting/pygletplot/plot_axes.py,sha256=Q9YN8W0Hd1PeflHLvOvSZ-hxeLU4Kq3nUFLYDC0x0E8,8655 +sympy/plotting/pygletplot/plot_camera.py,sha256=yfkGg7TF3yPhhRUDhvPMT1uJgSboTwgAOtKOJdP7d8E,4001 +sympy/plotting/pygletplot/plot_controller.py,sha256=MroJJSPCbBDT8gGs_GdqpV_KHsllMNJpxx0MU3vKJV8,6941 +sympy/plotting/pygletplot/plot_curve.py,sha256=YwKA2lYC7IwCOQJaOVnww8AAG4P36cArgbC1iLV9OFI,2838 +sympy/plotting/pygletplot/plot_interval.py,sha256=doqr2wxnrED4MJDlkxQ07GFvaagX36HUb77ly_vIuKQ,5431 +sympy/plotting/pygletplot/plot_mode.py,sha256=Djq-ewVms_JoSriDpolDhhtttBJQdJO8BD4E0nyOWcQ,14156 +sympy/plotting/pygletplot/plot_mode_base.py,sha256=3z3WjeN7TTslHJevhr3X_7HRHPgUleYSngu6285lR6k,11502 +sympy/plotting/pygletplot/plot_modes.py,sha256=gKzJShz6OXa6EHKar8SuHWrELVznxg_s2d5IBQkkeYE,5352 +sympy/plotting/pygletplot/plot_object.py,sha256=qGtzcKup4It1CqZ2jxA7FnorCua4S9I-B_7I3SHBjcQ,330 +sympy/plotting/pygletplot/plot_rotation.py,sha256=K8MyudYRS2F-ku5blzkWg3q3goMDPUsXqzmHLDU2Uqc,1447 +sympy/plotting/pygletplot/plot_surface.py,sha256=C0q9tzDmxzC1IpWiNKY4llzcopx6dhotGOLpK1N9m3s,3803 +sympy/plotting/pygletplot/plot_window.py,sha256=5boC2Fkmk46-gWGqWzdTkPmTMNHHOpA0CnB9q946Hwc,4643 +sympy/plotting/pygletplot/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/pygletplot/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/pygletplot/tests/__pycache__/test_plotting.cpython-310.pyc,, +sympy/plotting/pygletplot/tests/test_plotting.py,sha256=NisjR-yuBRJfQvjcb20skTR3yid2U3MhKHW6sy8RE10,2720 +sympy/plotting/pygletplot/util.py,sha256=mzQQgDDbp04B03KyJrossLp8Yq72RJzjp-3ArfjbMH8,4621 +sympy/plotting/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/plotting/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_experimental_lambdify.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_plot.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_plot_implicit.cpython-310.pyc,, +sympy/plotting/tests/__pycache__/test_textplot.cpython-310.pyc,, +sympy/plotting/tests/test_experimental_lambdify.py,sha256=EYshdXA5tAGWolaDX-nHAolp7xIJN4Oqb1Uc1C1IhJI,3127 +sympy/plotting/tests/test_plot.py,sha256=HWledOPr2xKq3XFGr458Lc5c0wgf2e0IFa4j63bfdH0,25204 +sympy/plotting/tests/test_plot_implicit.py,sha256=gXXMvVCIlp3HeN12Ej636RnhNEmV3i5WnDA48rjRPOg,5804 +sympy/plotting/tests/test_region_and.png,sha256=EV0Lm4HtQPk_6eIWtPY4TPcQk-O7tkpdZIuLmFjGRaA,6864 +sympy/plotting/tests/test_region_not.png,sha256=3O_9_nPW149FMULEcT5RqI2-k2H3nHELbfJADt2cO8k,7939 +sympy/plotting/tests/test_region_or.png,sha256=5Bug09vyog-Cu3mky7pbtFjew5bMvbpe0ZXWsgDKfy4,8809 +sympy/plotting/tests/test_region_xor.png,sha256=kucVWBA9A98OpcR4did5aLXUyoq4z0O4C3PM6dliBSw,10002 +sympy/plotting/tests/test_textplot.py,sha256=VurTGeMjUfBLpLdoMqzJK9gbcShNb7f1OrAcRNyrtag,12761 +sympy/plotting/textplot.py,sha256=M3TEzIDV6l6CpMpPZcAVrO-Y_pYbRRCsbuPMGAaQEXs,4921 +sympy/polys/__init__.py,sha256=2ZG4bdqNChU1niEsfBNC57G9B51TLYxiDy5WG5_2kMc,5545 +sympy/polys/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/__pycache__/appellseqs.cpython-310.pyc,, +sympy/polys/__pycache__/compatibility.cpython-310.pyc,, +sympy/polys/__pycache__/constructor.cpython-310.pyc,, +sympy/polys/__pycache__/densearith.cpython-310.pyc,, +sympy/polys/__pycache__/densebasic.cpython-310.pyc,, +sympy/polys/__pycache__/densetools.cpython-310.pyc,, +sympy/polys/__pycache__/dispersion.cpython-310.pyc,, +sympy/polys/__pycache__/distributedmodules.cpython-310.pyc,, +sympy/polys/__pycache__/domainmatrix.cpython-310.pyc,, +sympy/polys/__pycache__/euclidtools.cpython-310.pyc,, +sympy/polys/__pycache__/factortools.cpython-310.pyc,, +sympy/polys/__pycache__/fglmtools.cpython-310.pyc,, +sympy/polys/__pycache__/fields.cpython-310.pyc,, +sympy/polys/__pycache__/galoistools.cpython-310.pyc,, +sympy/polys/__pycache__/groebnertools.cpython-310.pyc,, +sympy/polys/__pycache__/heuristicgcd.cpython-310.pyc,, +sympy/polys/__pycache__/modulargcd.cpython-310.pyc,, +sympy/polys/__pycache__/monomials.cpython-310.pyc,, +sympy/polys/__pycache__/multivariate_resultants.cpython-310.pyc,, +sympy/polys/__pycache__/orderings.cpython-310.pyc,, +sympy/polys/__pycache__/orthopolys.cpython-310.pyc,, +sympy/polys/__pycache__/partfrac.cpython-310.pyc,, +sympy/polys/__pycache__/polyclasses.cpython-310.pyc,, +sympy/polys/__pycache__/polyconfig.cpython-310.pyc,, +sympy/polys/__pycache__/polyerrors.cpython-310.pyc,, +sympy/polys/__pycache__/polyfuncs.cpython-310.pyc,, +sympy/polys/__pycache__/polymatrix.cpython-310.pyc,, +sympy/polys/__pycache__/polyoptions.cpython-310.pyc,, +sympy/polys/__pycache__/polyquinticconst.cpython-310.pyc,, +sympy/polys/__pycache__/polyroots.cpython-310.pyc,, +sympy/polys/__pycache__/polytools.cpython-310.pyc,, +sympy/polys/__pycache__/polyutils.cpython-310.pyc,, +sympy/polys/__pycache__/rationaltools.cpython-310.pyc,, +sympy/polys/__pycache__/ring_series.cpython-310.pyc,, +sympy/polys/__pycache__/rings.cpython-310.pyc,, +sympy/polys/__pycache__/rootisolation.cpython-310.pyc,, +sympy/polys/__pycache__/rootoftools.cpython-310.pyc,, +sympy/polys/__pycache__/solvers.cpython-310.pyc,, +sympy/polys/__pycache__/specialpolys.cpython-310.pyc,, +sympy/polys/__pycache__/sqfreetools.cpython-310.pyc,, +sympy/polys/__pycache__/subresultants_qq_zz.cpython-310.pyc,, +sympy/polys/agca/__init__.py,sha256=fahpWoG_0LgoqOXBnDBJS16Jj1fE1_VKG7edM3qZ2HE,130 +sympy/polys/agca/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/agca/__pycache__/extensions.cpython-310.pyc,, +sympy/polys/agca/__pycache__/homomorphisms.cpython-310.pyc,, +sympy/polys/agca/__pycache__/ideals.cpython-310.pyc,, +sympy/polys/agca/__pycache__/modules.cpython-310.pyc,, +sympy/polys/agca/extensions.py,sha256=v3VmKWXQeyPuwNGyizfR6ZFb4GkRZ97xREHawuLWqpg,9168 +sympy/polys/agca/homomorphisms.py,sha256=gaMNV96pKUuYHZ8Bd7QOs27J1IbbJgkEjyWcTLe8GFI,21937 +sympy/polys/agca/ideals.py,sha256=8rh6iQt26zF0qKzHlfqGXKZzKuGY6Y5t9hBNVGG9v5M,10891 +sympy/polys/agca/modules.py,sha256=UZBnmvsQTHRkSVGdst6nksp9a07ZYD65eArjL91n3-Q,46946 +sympy/polys/agca/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/agca/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_extensions.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_homomorphisms.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_ideals.cpython-310.pyc,, +sympy/polys/agca/tests/__pycache__/test_modules.cpython-310.pyc,, +sympy/polys/agca/tests/test_extensions.py,sha256=i3IHQNXQByFMCvjjyd_hwwJSCiUj0z1rRwS9WFK2AFc,6455 +sympy/polys/agca/tests/test_homomorphisms.py,sha256=m0hFmcTzvZ8sZbbnWeENwzKyufpE9zWwZR-WCI4kdpU,4224 +sympy/polys/agca/tests/test_ideals.py,sha256=w76qXO-_HN6LQbV7l3h7gJZsM-DZ2io2X-kPWiHYRNw,3788 +sympy/polys/agca/tests/test_modules.py,sha256=HdfmcxdEVucEbtfmzVq8i_1wGojT5b5DE5VIfbTMx3k,13552 +sympy/polys/appellseqs.py,sha256=hWeDKsKnJuAuPN_5IU6m1okurAq9xMt3LQgMehcvBKQ,8305 +sympy/polys/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_galoispolys.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_groebnertools.cpython-310.pyc,, +sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc,, +sympy/polys/benchmarks/bench_galoispolys.py,sha256=8RtN9ZQga2oxscVPPkMGB29Dz8UbskMS2szYtqZ69u0,1502 +sympy/polys/benchmarks/bench_groebnertools.py,sha256=YqGDCzewRszCye_GnneDXMRNB38ORSpVu_Jn0ELIySo,803 +sympy/polys/benchmarks/bench_solvers.py,sha256=gLrZguh6pE0E4_vM2GeOS5bHnrcSUQXqD0Qz9tItfmo,446778 +sympy/polys/compatibility.py,sha256=OkpZiIrD2u_1YB7dE2NJmhpt1UZoBNoX2JBY3q1Uixo,57743 +sympy/polys/constructor.py,sha256=4hqADMZrcLOsnzVebcZxnn3LJ7HdPIHReq0Qalf91EY,11371 +sympy/polys/densearith.py,sha256=6lkYHNpTPp2qq8qKBNiK9V-xNqLg0MYcoi_ksKaNBcg,34108 +sympy/polys/densebasic.py,sha256=H9DimmE5zLuEpzyYvTWBViBJTe5bbLj-1RefaAy2XXk,35922 +sympy/polys/densetools.py,sha256=q75QA1e0rH9TpVbTGIwRgeisNFt-7HiRcdPUEdHYN2E,25902 +sympy/polys/dispersion.py,sha256=s6GIYnGA6U9jhGP7YXQQS8G3byG4-kPbr55BR6p-iz4,5740 +sympy/polys/distributedmodules.py,sha256=t8pLIgDQs_dMecGXwybVYoLavofEy2DXhFS8N5gj5SU,21827 +sympy/polys/domainmatrix.py,sha256=FmNqklNFQR1WrQYtP2r7jypw2IQadNKGP14EaUaxUqI,310 +sympy/polys/domains/__init__.py,sha256=T6qPNkU1EJ6D5BnvyJSXJv4zeJ5MUT5RLsovMkkXS9E,1872 +sympy/polys/domains/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/domains/__pycache__/algebraicfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/characteristiczero.cpython-310.pyc,, +sympy/polys/domains/__pycache__/complexfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/compositedomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/domain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/domainelement.cpython-310.pyc,, +sympy/polys/domains/__pycache__/expressiondomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/expressionrawdomain.cpython-310.pyc,, +sympy/polys/domains/__pycache__/field.cpython-310.pyc,, +sympy/polys/domains/__pycache__/finitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/fractionfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gaussiandomains.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyfinitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyintegerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/gmpyrationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/groundtypes.cpython-310.pyc,, +sympy/polys/domains/__pycache__/integerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/modularinteger.cpython-310.pyc,, +sympy/polys/domains/__pycache__/mpelements.cpython-310.pyc,, +sympy/polys/domains/__pycache__/old_fractionfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/old_polynomialring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/polynomialring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonfinitefield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonintegerring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonrational.cpython-310.pyc,, +sympy/polys/domains/__pycache__/pythonrationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/quotientring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/rationalfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/realfield.cpython-310.pyc,, +sympy/polys/domains/__pycache__/ring.cpython-310.pyc,, +sympy/polys/domains/__pycache__/simpledomain.cpython-310.pyc,, +sympy/polys/domains/algebraicfield.py,sha256=hg2F7SBrc0I-uqRa90ehtHiF6bCo_AB98XDHRRcGFZw,21556 +sympy/polys/domains/characteristiczero.py,sha256=vHYRUXPrfJzDF8wrd1KSFqG8WzwfITP_eweA-SHPVYA,382 +sympy/polys/domains/complexfield.py,sha256=2GjeNMebTXxLHDkKYqbrP-hZqBXHoc_Uv7kk7xIyPcw,4620 +sympy/polys/domains/compositedomain.py,sha256=wgw_yKwC5gHYWxRHEbVDeHOKQycFkZH0ZxhVES0AR04,1042 +sympy/polys/domains/domain.py,sha256=KOj3-sDzLox86n3Av2Vl6nExWszyWXkJz0-lDpXDwJ4,38006 +sympy/polys/domains/domainelement.py,sha256=IrG-Mzv_VlCAmE-hmJVH_d77TrsfyaGGfJVmU8FFvlY,860 +sympy/polys/domains/expressiondomain.py,sha256=rk2Vky-C5sQiOtkWbtxh1s5_aOALGCREzq-R6qxVZ-I,6924 +sympy/polys/domains/expressionrawdomain.py,sha256=cXarD2jXi97FGNiqNiDqQlX0g764EW2M1PEbrveImnY,1448 +sympy/polys/domains/field.py,sha256=tyOjEqABaOXXkaBEL0qLqyG4g5Ktnd782B_6xTCfia8,2591 +sympy/polys/domains/finitefield.py,sha256=yFU8-FvoDxGQ9Yo-mKlOqnB-91ctpz_TT0zLRmx-iQI,6025 +sympy/polys/domains/fractionfield.py,sha256=pKR3dfOOXqBIwf3jvRnaqgA-t1YYWdubCuz3yNnxepU,5945 +sympy/polys/domains/gaussiandomains.py,sha256=qkbqSXzumxwQq7QGAyvNsgJZlzF5MbvN2O9nz2li-kQ,17975 +sympy/polys/domains/gmpyfinitefield.py,sha256=C_Nd9GubSMBJmIe5vs_C2IuBT8YGFL4xgK4oixNCOrk,444 +sympy/polys/domains/gmpyintegerring.py,sha256=U6Ph1_5Ez5bXN4JcF2Tsq1FUDEwYsGx0nUT-gZDvO5U,3017 +sympy/polys/domains/gmpyrationalfield.py,sha256=dZjrfcWaUA-BHUtutzLOWPlOSNLYzBqSFeukER6L_bA,3178 +sympy/polys/domains/groundtypes.py,sha256=bHPHdmpFRBWe86TNMSsE6m5grvE0bQWLWnRGRBBxMpQ,1615 +sympy/polys/domains/integerring.py,sha256=T2MvIiEI3OPFoOQ5Ep3HgZhNU1evP-Wxu0oDVG7oJa8,6085 +sympy/polys/domains/modularinteger.py,sha256=bAUskiiX1j-n9SLx79jUCPOuO9mDNbzUcuijRcI7Hg4,5094 +sympy/polys/domains/mpelements.py,sha256=MxymxwlGBA3Px2FFyzISEtAnkVoxeq-bJM1fk2jkEts,4616 +sympy/polys/domains/old_fractionfield.py,sha256=6qVb4Zzfq8ArxDyghXwW5Vvw4SattdIt0HUx4WcnD8U,6178 +sympy/polys/domains/old_polynomialring.py,sha256=_Rengtf5vN3w9GJAsDFcN3yKbWjYqkTbsPdxbtbplnE,14914 +sympy/polys/domains/polynomialring.py,sha256=kStXSAtq1b5Tk3vrEze7_E8UMn8bF91Goh7hVzhtax0,6153 +sympy/polys/domains/pythonfinitefield.py,sha256=RYwDRg1zVLLGtJvVXvWhwUZjC91g8pXTwAjuQoWezks,460 +sympy/polys/domains/pythonintegerring.py,sha256=qUBqWBtP_faY-m2tJA07JQyCTdh27tXVBDD7vsKNUn4,2929 +sympy/polys/domains/pythonrational.py,sha256=M3VUGODh3MLElePjYtjt9b02ReMThw-XXpuQTkohgNs,548 +sympy/polys/domains/pythonrationalfield.py,sha256=x8BPkGKj0WPuwJzN2py5l9aAjHaY4djv65c4tzUTr3Y,2295 +sympy/polys/domains/quotientring.py,sha256=LBUIIpN3y3QPS6pFYWwqpca5ShoWDyaZbZ6PwDm_SmA,5866 +sympy/polys/domains/rationalfield.py,sha256=-4rLYoh3IhsURx09OtLR3A29NLDi_RO-QzWO3RGoy8Q,4869 +sympy/polys/domains/realfield.py,sha256=Wt5_y7HTDe8u1qGalhNhTT7Rw3CQiVkmgduQ7jcpD9c,3782 +sympy/polys/domains/ring.py,sha256=p66U2X58acSHLHxOTU6aJZ0Umdcu1qiGIUDtV8iJCD0,3236 +sympy/polys/domains/simpledomain.py,sha256=_K-Zz8Opf505r3eHSrbPAlnGiGSjY_O4Cwa4OTeOSoY,369 +sympy/polys/domains/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/domains/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_domains.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_polynomialring.cpython-310.pyc,, +sympy/polys/domains/tests/__pycache__/test_quotientring.cpython-310.pyc,, +sympy/polys/domains/tests/test_domains.py,sha256=1PsckHIBXMQFm-sgSDMjiUor2c-000iEZhqqPV9pfR4,43846 +sympy/polys/domains/tests/test_polynomialring.py,sha256=gW82jcxL2J5nKrA4iDCuk88K1bqpfAG7z32Y9191mKU,3312 +sympy/polys/domains/tests/test_quotientring.py,sha256=BYoq1CqI76RDSm0xQdp1v7Dv1n5sdcmes-b_y_AfW-0,1459 +sympy/polys/euclidtools.py,sha256=h8qC0ZsXf-ZKPLIMBaLV2aSCHDuXLQBczKZcU-J2BaE,41221 +sympy/polys/factortools.py,sha256=AghhwHVn_wJsEBBo-THmMIKT9zr-gBJlkLTctJrT_eY,38457 +sympy/polys/fglmtools.py,sha256=KYZuP4CxAN3KP6If3hM53HKM4S87rNU2HecwbYjWfOE,4302 +sympy/polys/fields.py,sha256=HEXUOH-bhYkTTXyev87LZPsyK3-aeqCmGRgErFiJzhA,21245 +sympy/polys/galoistools.py,sha256=cuwAArjtyoV4wfaQtX8fs4mz4ZXLuc6yKvHObyXgnw8,52133 +sympy/polys/groebnertools.py,sha256=NhK-XcFR9e4chDDJJ-diXb7XYuw9zcixFA_riomThPM,23342 +sympy/polys/heuristicgcd.py,sha256=rD3intgKCtAAMH3sqlgqbJL1XSq9QjfeG_MYzwCOek0,3732 +sympy/polys/matrices/__init__.py,sha256=ZaPJMi8l22d3F3rudS4NqzSt0xwxbs3uwnQwlhhR91o,397 +sympy/polys/matrices/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/_typing.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/ddm.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/dense.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/domainmatrix.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/domainscalar.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/eigen.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/exceptions.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/linsolve.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/lll.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/normalforms.cpython-310.pyc,, +sympy/polys/matrices/__pycache__/sdm.cpython-310.pyc,, +sympy/polys/matrices/_typing.py,sha256=ZMxO82uprk9lCq4ClHL-pg6_wOmmnLozg0sQhJrjbbk,319 +sympy/polys/matrices/ddm.py,sha256=a-NJkOmGtm0P8Y88e9frpxRwap-gGZluG07oDReeyTg,13586 +sympy/polys/matrices/dense.py,sha256=LcFY1OAEvIaXzdToD84VvU_DZmNwRSiZt3PA-6YCwMQ,8718 +sympy/polys/matrices/domainmatrix.py,sha256=KeXk7Q0vTweGAWZduZHo2u0RUl2g2EnPeCXgz-16vrQ,47889 +sympy/polys/matrices/domainscalar.py,sha256=zosOQfLeKsMpAv1sm-JHPneGmMTeELvAloNxKMkZ8Uo,3643 +sympy/polys/matrices/eigen.py,sha256=pvICWI8_r_usa0EFqlbz7I8ASzKMK2j2gn-65CmTSPU,2983 +sympy/polys/matrices/exceptions.py,sha256=ay3Lv21X3QqszysBN71xdr9KGQuC5kDBl90a2Sjx6pM,1351 +sympy/polys/matrices/linsolve.py,sha256=fuuS_NvFFw7vP7KEtkfursOtgJmnIWSv9PEZv56ovOE,7548 +sympy/polys/matrices/lll.py,sha256=8vWLPm3SaFDY5pAwawzb2paF29hmJBucVdxwqGEzcAk,3556 +sympy/polys/matrices/normalforms.py,sha256=SkrGcuvfi27Bb3UeU_HHtCU4HrPSZSz1Azh5p4TqZ68,13105 +sympy/polys/matrices/sdm.py,sha256=Y_GV0aMlJDDa452OA72EwxvwKQAA3NaZRGVRwqwbKTI,35571 +sympy/polys/matrices/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/matrices/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_ddm.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_dense.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_domainmatrix.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_domainscalar.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_eigen.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_linsolve.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_lll.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_normalforms.cpython-310.pyc,, +sympy/polys/matrices/tests/__pycache__/test_sdm.cpython-310.pyc,, +sympy/polys/matrices/tests/test_ddm.py,sha256=3tFhjkA1alE827Qiw9mAPlWkSgV3Sesrqeh-NxHXsA4,16640 +sympy/polys/matrices/tests/test_dense.py,sha256=Ig_SJ86pogur9AEfcetO_L01fy1WFhe-E9g9ngVTlxs,9483 +sympy/polys/matrices/tests/test_domainmatrix.py,sha256=IjRa6uCfAu1hm6XrN1fUUaAA2GeVxi5IgVaf4vZc4Lk,32371 +sympy/polys/matrices/tests/test_domainscalar.py,sha256=9HQL95XlxyXHNDf_UBN9t1da_9syRNZGOb7IKkmjn-U,3624 +sympy/polys/matrices/tests/test_eigen.py,sha256=T1lYZeW-0NwDxDOG6ZJLr-OICfxY2wa0fVHV2V6EXSk,3200 +sympy/polys/matrices/tests/test_linsolve.py,sha256=G1LCDkB3BDUuDzQuUxn4jCjqUSbCwMX_lfkVXDLe-k0,3334 +sympy/polys/matrices/tests/test_lll.py,sha256=Zg7rNTlywHgrhr9OYpRj5yW6t2JPzJvwcclCvRNc7xw,6480 +sympy/polys/matrices/tests/test_normalforms.py,sha256=_4Cm3EJxHh3TEwF278uB7WQZweFWFsx3j0zc2AZFgDI,3036 +sympy/polys/matrices/tests/test_sdm.py,sha256=H0oNZkNmwpP8i6UpysnkD7yave0E3YU3Z8dKGobSbOA,14000 +sympy/polys/modulargcd.py,sha256=vE57ZJv1iJNKHcRbFJBgG6Jytudweq3wyDB90yxtFCc,58664 +sympy/polys/monomials.py,sha256=R2o7vpjdZdpp57u-PrKw1REk_Cr9uoNcum1a8DnDHZg,18925 +sympy/polys/multivariate_resultants.py,sha256=G9NCKrb5MBoUshiB_QD86w6MwQAxLwOmc-_HFO_ZXdE,15265 +sympy/polys/numberfields/__init__.py,sha256=ZfhC9MyfGfGUz_DT_rXasB-M_P2zUiZXOJUNh_Gtm8c,538 +sympy/polys/numberfields/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/basis.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/exceptions.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/galois_resolvents.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/galoisgroups.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/minpoly.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/modules.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/primes.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/resolvent_lookup.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/subfield.cpython-310.pyc,, +sympy/polys/numberfields/__pycache__/utilities.cpython-310.pyc,, +sympy/polys/numberfields/basis.py,sha256=IPA6cSwz-53ClQwo-wkmRzfx9pRX4iBhiggdLMVSgJ0,8261 +sympy/polys/numberfields/exceptions.py,sha256=IN36PiHvWvH5YOtWmU0EHSPiKhGryPezcOawdQmesMo,1668 +sympy/polys/numberfields/galois_resolvents.py,sha256=iGuCtXU5ZsoyHZVIbj7eh3ry_zhdAtUaV30Df7pT8WM,24858 +sympy/polys/numberfields/galoisgroups.py,sha256=_ORI7MYUyWhBuDsRL9W0olW5piJLkRNFsbRoJPPkryk,20665 +sympy/polys/numberfields/minpoly.py,sha256=uMMy3Ddui5_oNUBS55JNLF5xAZywfJzUjINmWRw3_EU,27716 +sympy/polys/numberfields/modules.py,sha256=pK69MtEb5BcrSWU9E9jtpVxGhEcR-5XB8_qatpskFVk,69117 +sympy/polys/numberfields/primes.py,sha256=9UHrJrIDPhAcNtqrDcqXIm9Z-Ch69W_gKGOBfDKduro,23967 +sympy/polys/numberfields/resolvent_lookup.py,sha256=qfLNKOz_WjtXwpVlfzy8EkD4gw12epx9npE9HsjyIdg,40411 +sympy/polys/numberfields/subfield.py,sha256=_s8u4a1y1L4HhoKEpoemSvNrXdW0Mh4YvrUOozq_lvc,16480 +sympy/polys/numberfields/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/numberfields/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_basis.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_galoisgroups.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_minpoly.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_modules.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_numbers.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_primes.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_subfield.cpython-310.pyc,, +sympy/polys/numberfields/tests/__pycache__/test_utilities.cpython-310.pyc,, +sympy/polys/numberfields/tests/test_basis.py,sha256=96BJ7e4oPDKXyvlRrUkiQxmHyjRGpOkAC7R3ln-jgNE,4580 +sympy/polys/numberfields/tests/test_galoisgroups.py,sha256=3LFuMbV92VBFlqqEqjh37oQvmG8cgZ0pFxDCXUoYRL4,5036 +sympy/polys/numberfields/tests/test_minpoly.py,sha256=IA0WH56vMXbSQpiml78jZes1M1XZSHDRARv5tM4VGTQ,22590 +sympy/polys/numberfields/tests/test_modules.py,sha256=GU4166j_hMlB22uWxxIjV_ON8RsyvpaN7Ly3eK8_m8Y,22926 +sympy/polys/numberfields/tests/test_numbers.py,sha256=M0vZIBnjPBHV4vFUnPBILaqiR_cgSuU50kFB-v7l1gA,5988 +sympy/polys/numberfields/tests/test_primes.py,sha256=JhcAkaQMgjkOSziQ2jZApJ8b8oviil5cUy0hfFqNmZg,9779 +sympy/polys/numberfields/tests/test_subfield.py,sha256=_aCbvukrahv-QyCwNT7EpTYC1u53yUlMhfGqV5GzW3Y,12215 +sympy/polys/numberfields/tests/test_utilities.py,sha256=T3YfFouXZNcBG2AfLEQ77Uqy-_TTufGTUsysmzUHNuA,3655 +sympy/polys/numberfields/utilities.py,sha256=aQBm_rgKxjHOCTktOYJ-aI5Cpb59IBvWJiyZCowcM-I,13081 +sympy/polys/orderings.py,sha256=IFieyj4LkFa7NDiGTZD3VwUY7mSN3GEjThKk0z5WJ1s,8500 +sympy/polys/orthopolys.py,sha256=Kjx3fSoLDpX-bXUlgkPQdOK_TutIidI0MHmJ-6cviKM,8526 +sympy/polys/partfrac.py,sha256=KzReYNMyYfgXUM-UFj67eQU7MQk6EsbfhVuf4_Tl_u0,14665 +sympy/polys/polyclasses.py,sha256=byf1JS2pYGCZXGvzaxnBC18r--jTf0OFqOjJxWy6z_U,54564 +sympy/polys/polyconfig.py,sha256=mgfFpp9SU159tA_PM2o04WZyzMoWfOtWZugRcHnP42c,1598 +sympy/polys/polyerrors.py,sha256=xByI-fqIHVYsYRm63NmHXlSSRCwSI9vZUoO-1Mf5Wlk,4744 +sympy/polys/polyfuncs.py,sha256=OEZpdYeHQADBJYqMw8JAyN4sw-jsJ6lzVH6m-CCoK8g,8547 +sympy/polys/polymatrix.py,sha256=83_9L66dbzVv0UfbPR3OTKtxZZ6sMaeOifMBPUDBeiM,9749 +sympy/polys/polyoptions.py,sha256=BqXFyhKVDoFRJlSSBb_jxOkWPzM2MpQ67BKiQR852A8,21721 +sympy/polys/polyquinticconst.py,sha256=mYLFWSBq3H3Y0I8cx76Z_xauLx1YeViC4xF6yWsSTPQ,96035 +sympy/polys/polyroots.py,sha256=etxwQFngxSLRgjRJ8AzPc28CCQm56xx9CRlp4MPwhl4,36995 +sympy/polys/polytools.py,sha256=H8xrnAGUu8Df_HStGD2wVpI-cKOhqEYlEECJ9ep3PHM,194263 +sympy/polys/polyutils.py,sha256=gGwRUZXAFv132f96uONc6Ybfh8xyyP9pAouNY6fX-uQ,16519 +sympy/polys/rationaltools.py,sha256=gkLu0YvsSJ2b04AOK7MV_rjp1m6exLkdqClOjrbBboo,2848 +sympy/polys/ring_series.py,sha256=qBKirsiZpM5x0ix4V5ntm7inynnahYCfVSgHZRCpccc,57766 +sympy/polys/rings.py,sha256=rparZxHTHV9j7Av3XUnAE2CSn1WglhXveO13IcuDljE,72970 +sympy/polys/rootisolation.py,sha256=vOvKe1Vi2uklmMB4qNy_EczSRzelMUqPB3o7qYdiWR0,64527 +sympy/polys/rootoftools.py,sha256=_rwgSXUkgg0bUsp949GiSz6ouoxuyysclg-fKGxRlYA,41040 +sympy/polys/solvers.py,sha256=CWrzPJNlosjhxScXzIHYZQwCjsLnkAgAeIgYrY92gbc,13519 +sympy/polys/specialpolys.py,sha256=B2vijl75zgUKUTY1HCqjB9BTDFf3FM8ugwkKGTB83XA,11038 +sympy/polys/sqfreetools.py,sha256=2Gdv9t9TNgdbnc-7XrpEhgYJfSvacHUyuE1aOWo9DXU,11464 +sympy/polys/subresultants_qq_zz.py,sha256=TDVS9-rEBXK88m4mAixuvPFMAXmn3MwKaSsGmq9oUCo,88261 +sympy/polys/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/polys/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_appellseqs.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_constructor.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densearith.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densebasic.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_densetools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_dispersion.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_distributedmodules.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_euclidtools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_factortools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_fields.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_galoistools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_groebnertools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_heuristicgcd.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_injections.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_modulargcd.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_monomials.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_multivariate_resultants.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_orderings.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_orthopolys.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_partfrac.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyclasses.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyfuncs.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polymatrix.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyoptions.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyroots.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polytools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_polyutils.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_pythonrational.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rationaltools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_ring_series.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rings.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rootisolation.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_rootoftools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_specialpolys.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_sqfreetools.cpython-310.pyc,, +sympy/polys/tests/__pycache__/test_subresultants_qq_zz.cpython-310.pyc,, +sympy/polys/tests/test_appellseqs.py,sha256=YTERuRr30QtfxYR0erXvJG8D-INe9RaMFAF0ZM-H4Ks,3820 +sympy/polys/tests/test_constructor.py,sha256=U1LBjA881oG4A8oMXqZe0sZ42pmH7YpR_VSJjBNZz-w,6378 +sympy/polys/tests/test_densearith.py,sha256=1YBmEJTtPRWj4l39HMkFD6ffkU8h3pIs7lz-k_9XGYk,40428 +sympy/polys/tests/test_densebasic.py,sha256=vcoTscGRB1bef9UhclHcsKnBJp9baexjQ-enXq1-pKM,21477 +sympy/polys/tests/test_densetools.py,sha256=QM1Yt0hOHBnUTvdn14aFRUdfMQE9P2q1Hpzeud-n-ds,24572 +sympy/polys/tests/test_dispersion.py,sha256=8JfwjSNy7X74qJODMaVp1GSLprFiRDVt6XrYc_-omgQ,3183 +sympy/polys/tests/test_distributedmodules.py,sha256=dXmjhozX5Yzb7DsrtbdFTqAxi9Z1UZNJvGxj-vHM7cM,7639 +sympy/polys/tests/test_euclidtools.py,sha256=vEyj48eIjm6-KRQtThNfI4ic_VDNB6l7jMouxJAF9HE,19482 +sympy/polys/tests/test_factortools.py,sha256=MXOJfhjrLAu-UCyXg6YRMYAc7nkw6SAfkY66_RKG9Es,24560 +sympy/polys/tests/test_fields.py,sha256=vrdg27319R3Zro_idhQVxIeomN9P6mU3jHyX7HZKeMU,10245 +sympy/polys/tests/test_galoistools.py,sha256=btKRaqckjvyGOhCvIfwLtRDVG2Qiwo6CTnoPW8h4S9E,28130 +sympy/polys/tests/test_groebnertools.py,sha256=ZWHBcCCOVNwDxuJWg1WPo0krTHx1m1wTPi2cOYPsAT4,18584 +sympy/polys/tests/test_heuristicgcd.py,sha256=wsAKgOKuLYra14qMS8EUt_Pda_SoBfP90X2-Tv1WG7A,4031 +sympy/polys/tests/test_injections.py,sha256=EONGggBUNWaVSwi817CzLBYJgkTehFq8-m-Qdqes984,1286 +sympy/polys/tests/test_modulargcd.py,sha256=GE-24EnWOAQVYwgBb5PJzySX6EEJQs-q3HRFBWsXkTE,9042 +sympy/polys/tests/test_monomials.py,sha256=bY057IDFyVs864jcJ46ZITLv57xMKNfBVwBC-mnzJLA,10988 +sympy/polys/tests/test_multivariate_resultants.py,sha256=DJu8CcZ3xwx8njpjDeSOyhyxeqZYmhfb7dkSCU-ll7Y,9501 +sympy/polys/tests/test_orderings.py,sha256=bdsIsqJTFJCVyZNRMAGVDXVk79ldw9rmAGejS_lwKP0,4254 +sympy/polys/tests/test_orthopolys.py,sha256=UpJwPlmqZ3IZtWhaLcfhR5EyKj49_VpruRlI2dK_Awk,6379 +sympy/polys/tests/test_partfrac.py,sha256=78xlrvzvON2047j_DeQ0E8BBZg6Z1koJzksj5rQah9A,7096 +sympy/polys/tests/test_polyclasses.py,sha256=uUjLcfKrfW-EBB6N9ofESJgw4_QacKWN1fLa0etn6iY,13321 +sympy/polys/tests/test_polyfuncs.py,sha256=VbgCgCRE06dtSY9I9GSdPH9T52ETYYoxk4J3N1WBtd4,4520 +sympy/polys/tests/test_polymatrix.py,sha256=pl2VrN_d2XGOVHvvAnaNQzkdFTdQgjt9ePgo41soBRs,7353 +sympy/polys/tests/test_polyoptions.py,sha256=z9DUdt8K3lYkm4IyLH1Cv-TKe76HP-EyaRkZVsfWb6U,12416 +sympy/polys/tests/test_polyroots.py,sha256=LUh1A92dy93Ou2t2_650ujTqvC3DQK0qpl3QO7VZCrk,26809 +sympy/polys/tests/test_polytools.py,sha256=855XWTO3k68OALdT-PpsZ8ZfQepTsUEhDxU8dYyF1SE,126200 +sympy/polys/tests/test_polyutils.py,sha256=Qs3QQl0WYmTnkYE2ovTxdLeu6DYnWO_OoUmLwNDZzSw,11547 +sympy/polys/tests/test_pythonrational.py,sha256=vYMlOTuYvf-15P0nKTFm-uRrhUc-nCFEkqYFAPLxg08,4143 +sympy/polys/tests/test_rationaltools.py,sha256=wkvjzNP1IH-SdubNk5JJ7OWcY-zNF6z3t32kfp9Ncs0,2397 +sympy/polys/tests/test_ring_series.py,sha256=SCUiciL10XGGjxFuM6ulzA460XAUVRykW3HLb8RNsc0,24662 +sympy/polys/tests/test_rings.py,sha256=g3hl2fMJ6-X7-k9n3IBdOAtyqONbjYwTizlrFpWTR4M,45393 +sympy/polys/tests/test_rootisolation.py,sha256=x-n-T-Con-8phelNa05BPszkC_UCW1C0yAOwz658I60,32724 +sympy/polys/tests/test_rootoftools.py,sha256=psVf3YA1MMkeuVvn-IpmF_rc3AEhh8U4U09h6dEY9u0,21531 +sympy/polys/tests/test_solvers.py,sha256=LZwjEQKKpFdCr4hMaU0CoN650BqU-arsACJNOF7lOmk,13655 +sympy/polys/tests/test_specialpolys.py,sha256=vBEDCC82ccGvxsETR5xr3yQ70Ho_HUqv1Q970vWf44M,4995 +sympy/polys/tests/test_sqfreetools.py,sha256=QJdMLVvQOiPm8ZYr4OESV71d5Ag9QcK1dMUkYv3pY5o,4387 +sympy/polys/tests/test_subresultants_qq_zz.py,sha256=ro6-F0vJrR46syl5Q0zuXfXQzEREtlkWAeRV9xJE31Y,13138 +sympy/printing/__init__.py,sha256=ws2P2KshXpwfnij4zaU3lVzIFQOh7nSjLbrB50cVFcU,2264 +sympy/printing/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/__pycache__/aesaracode.cpython-310.pyc,, +sympy/printing/__pycache__/c.cpython-310.pyc,, +sympy/printing/__pycache__/codeprinter.cpython-310.pyc,, +sympy/printing/__pycache__/conventions.cpython-310.pyc,, +sympy/printing/__pycache__/cxx.cpython-310.pyc,, +sympy/printing/__pycache__/defaults.cpython-310.pyc,, +sympy/printing/__pycache__/dot.cpython-310.pyc,, +sympy/printing/__pycache__/fortran.cpython-310.pyc,, +sympy/printing/__pycache__/glsl.cpython-310.pyc,, +sympy/printing/__pycache__/gtk.cpython-310.pyc,, +sympy/printing/__pycache__/jscode.cpython-310.pyc,, +sympy/printing/__pycache__/julia.cpython-310.pyc,, +sympy/printing/__pycache__/lambdarepr.cpython-310.pyc,, +sympy/printing/__pycache__/latex.cpython-310.pyc,, +sympy/printing/__pycache__/llvmjitcode.cpython-310.pyc,, +sympy/printing/__pycache__/maple.cpython-310.pyc,, +sympy/printing/__pycache__/mathematica.cpython-310.pyc,, +sympy/printing/__pycache__/mathml.cpython-310.pyc,, +sympy/printing/__pycache__/numpy.cpython-310.pyc,, +sympy/printing/__pycache__/octave.cpython-310.pyc,, +sympy/printing/__pycache__/precedence.cpython-310.pyc,, +sympy/printing/__pycache__/preview.cpython-310.pyc,, +sympy/printing/__pycache__/printer.cpython-310.pyc,, +sympy/printing/__pycache__/pycode.cpython-310.pyc,, +sympy/printing/__pycache__/python.cpython-310.pyc,, +sympy/printing/__pycache__/rcode.cpython-310.pyc,, +sympy/printing/__pycache__/repr.cpython-310.pyc,, +sympy/printing/__pycache__/rust.cpython-310.pyc,, +sympy/printing/__pycache__/smtlib.cpython-310.pyc,, +sympy/printing/__pycache__/str.cpython-310.pyc,, +sympy/printing/__pycache__/tableform.cpython-310.pyc,, +sympy/printing/__pycache__/tensorflow.cpython-310.pyc,, +sympy/printing/__pycache__/theanocode.cpython-310.pyc,, +sympy/printing/__pycache__/tree.cpython-310.pyc,, +sympy/printing/aesaracode.py,sha256=aVXDMh_YDRsDwPbZMt8X73jjv4DW8g15M1M4TdNlqXQ,18227 +sympy/printing/c.py,sha256=dQ2ucrIGZGgYB6hS4gLIzFKDEYpfABNbP54lS7H6AIQ,26942 +sympy/printing/codeprinter.py,sha256=RkV88Z-SSCGkWJXuc_7pe2zoB-hRheBtJDDPEyK5acQ,35350 +sympy/printing/conventions.py,sha256=k6YRWHfvbLHJp1uKgQX-ySiOXSsXH8QJxC9fymYmcSM,2580 +sympy/printing/cxx.py,sha256=CtkngKi4o_z5XMbmzpa1eC1uUR9SCbuOIli9Zsnh4Rc,5737 +sympy/printing/defaults.py,sha256=YitLfIRfFH8ltNd18Y6YtBgq5H2te0wFKlHuIO4cvo8,135 +sympy/printing/dot.py,sha256=W0J798ZxBdlJercffBGnNDTp7J2tMdIYQkE_KIiyi3s,8274 +sympy/printing/fortran.py,sha256=JeDXvo6dL0-yG2nk9oiTmgBiWJZrjeZURsMcrFuSayo,28568 +sympy/printing/glsl.py,sha256=fYURb8NYRAxmbMQleFs-X2IWQ7uk5xHkJVhgskrFsbU,20537 +sympy/printing/gtk.py,sha256=ptnwYxJr5ox3LG4TCDbRIgxsCikaVvEzWBaqIpITUXc,466 +sympy/printing/jscode.py,sha256=EkGUqMH3qBAbLVbSSuYi4ZQ89G4xUImDT2nTAf3nn9E,12131 +sympy/printing/julia.py,sha256=iJqOPrHhqJjAc6UnT_8R7A5NFcn6ImE3mOTLS7X0bUY,23553 +sympy/printing/lambdarepr.py,sha256=BCx4eSdG8MQ8ZSUV1lWEd3CzbZ4IiMid-TTxPoV6FHU,8305 +sympy/printing/latex.py,sha256=ImSA8Ri3-30szn-FgMC4xTkrjnq9qlGisUhZtUiTyYE,121722 +sympy/printing/llvmjitcode.py,sha256=wa32lF5254AOPnbV9F5OvQTd1HOk0rfN-HUekcN1HmI,17164 +sympy/printing/maple.py,sha256=yEGhEsE_WkG4M6PpRdURw-FbsG-eVLL8d2-d3CUpkHk,10588 +sympy/printing/mathematica.py,sha256=9R-wXu1SR7Rp5hDFHdrRA0CPpADI58qeGoSxbAMpYP0,12701 +sympy/printing/mathml.py,sha256=BZNSIr05Hf3i2qBeNq0rGGEtHsChD2p8lfqg6GpRU5M,75290 +sympy/printing/numpy.py,sha256=X-MKcpT1u6Z6qaFKs6N17TQnzZMaeSMeKpJEru6Mhvo,19776 +sympy/printing/octave.py,sha256=31BmnCU-CCqllApOBJp5EPQCRO7hjU7hvYTqYxerPYg,25621 +sympy/printing/precedence.py,sha256=dK6ueqV6OOXg0qY9L-goOgbQarqVRygIYK5FQGTBPR8,5268 +sympy/printing/pretty/__init__.py,sha256=pJTe-DO4ctTlnjg1UvqyoeBY50B5znFjcGvivXRhM2U,344 +sympy/printing/pretty/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/pretty.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/pretty_symbology.cpython-310.pyc,, +sympy/printing/pretty/__pycache__/stringpict.cpython-310.pyc,, +sympy/printing/pretty/pretty.py,sha256=Yom39Yqxqb7mO0FxSRqsOmxSUvrwCaORdE4e_78YGIk,105281 +sympy/printing/pretty/pretty_symbology.py,sha256=nfBI-cLYLBP9VuZxb7DSWtFIg3vgDphNfV-uBtFDMIE,20208 +sympy/printing/pretty/stringpict.py,sha256=NuWPIg1wLFMu39Cxf09pgVKix_oY7zAWrPOBWVd_5Jc,19097 +sympy/printing/pretty/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/printing/pretty/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc,, +sympy/printing/pretty/tests/test_pretty.py,sha256=IC7BOUZ01_-WrqBvn3nEAL89UezKzucS8dNDAvDzAHY,184797 +sympy/printing/preview.py,sha256=FwN0_q52iU6idLNZNXo002gPNpVw_9xrxLifFnK_ssw,14104 +sympy/printing/printer.py,sha256=0-hGTS9IPEqqP3s2sW7cZWyBe6opGa1FzyIRhND6FkA,14479 +sympy/printing/pycode.py,sha256=L6SbgH4ulnqTKVvAUtaKCATX4XYLNK-rs2UAgVe-1Rw,24290 +sympy/printing/python.py,sha256=sJcUWJYaWX41EZVkhUmZqpLA2ITcYU65Qd1UKZXMdFo,3367 +sympy/printing/rcode.py,sha256=mgWYYacqkLiBblV60CRH1G6FC9FkZ0LOfAYs1NgxOHA,14282 +sympy/printing/repr.py,sha256=p9G_EeK2WkI__6LFEtWyL1KFHJLL1KTFUJsp7N5n6vk,11649 +sympy/printing/rust.py,sha256=OD9xYBoTk-yRhhtbCaxyceg1lsnCaUclp_NWW4uaNYY,21377 +sympy/printing/smtlib.py,sha256=sJ0-_Ns2vH45b5oEXIPJtIOG9lvCEqHlJRQzQoiVC44,19445 +sympy/printing/str.py,sha256=OEX6W7wBj1aJIiq39qFxstyWJxkAp08RzOLolXObeIM,33260 +sympy/printing/tableform.py,sha256=-1d1cwmnprJKPXpViTbQxpwy3wT7K8KjPD5HCyjbDGk,11799 +sympy/printing/tensorflow.py,sha256=KHdJMHMBOaJkHO8_uBfYRHeBW2VIziv_YYqIV30D-dA,7906 +sympy/printing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/printing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_aesaracode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_c.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_codeprinter.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_conventions.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_cupy.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_cxx.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_dot.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_fortran.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_glsl.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_gtk.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_jax.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_jscode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_julia.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_lambdarepr.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_latex.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_llvmjit.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_maple.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_mathematica.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_mathml.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_numpy.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_octave.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_precedence.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_preview.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_pycode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_python.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_rcode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_repr.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_rust.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_smtlib.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_str.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tableform.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tensorflow.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_theanocode.cpython-310.pyc,, +sympy/printing/tests/__pycache__/test_tree.cpython-310.pyc,, +sympy/printing/tests/test_aesaracode.py,sha256=s0pu_J7hfDJ4HttXP6cFM6fSUU1rHgga1SeAIdbNbAo,21016 +sympy/printing/tests/test_c.py,sha256=OrK5CxLbppwiOX2L-Whh9h7GC9XXueNkWhhF5ODaCnA,30804 +sympy/printing/tests/test_codeprinter.py,sha256=Bdh1RcusYzR7lTQ8s3Sik7zw_INivUcW2AS4dA0OCtg,1410 +sympy/printing/tests/test_conventions.py,sha256=yqPpU3F0WcbxImPBBAHd3YEZpkFGfcq_TLK4WN_gtP4,5257 +sympy/printing/tests/test_cupy.py,sha256=-hO52M1RJSQe0qSVSl6B1LudZIgaBMme0Nkd6dQGr6g,1858 +sympy/printing/tests/test_cxx.py,sha256=900VUfUpS55zfllYGQcpjdC4Wmcg4T8TV94Mr430NZc,2490 +sympy/printing/tests/test_dot.py,sha256=TSAtgGIgK_JbY-RMbQgUvnAI87SJqeJOqzcLjAobhKM,4648 +sympy/printing/tests/test_fortran.py,sha256=8L2zwZX8_QuNwcx24swcQUTvXYTO-5i-YrPL1hTRUVI,35518 +sympy/printing/tests/test_glsl.py,sha256=cfog9fp_EOFm_piJwqUcSvAIJ78bRwkFjecwr3ocCak,28421 +sympy/printing/tests/test_gtk.py,sha256=94gp1xRlPrFiALQGuqHnmh9xKrMxR52RQVkN0MXbUdA,500 +sympy/printing/tests/test_jax.py,sha256=B5GVZV9UxKeOmb4lzJHDkQXRbWQiLLD7w7Ze3sDrWHQ,10536 +sympy/printing/tests/test_jscode.py,sha256=ObahZne9lQbBiXyJZLohjQGdHsG2CnWCFOB8KbFOAqQ,11369 +sympy/printing/tests/test_julia.py,sha256=U7R9zOckGWy99f5StDFE9lMXkcEmMkGHzYj1UM1xzgc,13875 +sympy/printing/tests/test_lambdarepr.py,sha256=YU_lAQpiNHKJpBjZmgXr-unzOwS6Ss-u8sS2D_u-Mq0,6947 +sympy/printing/tests/test_latex.py,sha256=m8UBxuluF0fEYoLSOMM79VtwhEzkqIiouu6vsaZ1G4c,135670 +sympy/printing/tests/test_llvmjit.py,sha256=EGPeRisM60_TIVgnk7PTLSm5F-Aod_88zLjHPZwfyZ8,5344 +sympy/printing/tests/test_maple.py,sha256=te2l-yWWfklFHnaw-F2ik8q2dqES2cxrnE1voJxMGL0,13135 +sympy/printing/tests/test_mathematica.py,sha256=vijg7xfoelywL-ZhNuXFfDjM1FgaW_4liTBx1wzpkWk,10954 +sympy/printing/tests/test_mathml.py,sha256=x4IckrMxOlSzt6CxGFpHdN2l6OXl7zrcxIHwn-KxeS8,96209 +sympy/printing/tests/test_numpy.py,sha256=7fGncgPzvUbSjtltsu-kwiCFPv9tJlv2zPLRFo3ZkNw,10360 +sympy/printing/tests/test_octave.py,sha256=xIFRIXtTHcuU6ZhBW8Ht_KjUPewJoCEQ0b5GVVRyP7g,18728 +sympy/printing/tests/test_precedence.py,sha256=CS4L-WbI2ZuWLgbGATtF41--h0iGkfuE6dK5DYYiC5g,2787 +sympy/printing/tests/test_preview.py,sha256=dSVxiGqdNR6gbF40V4J2tGhQ-T4RDvSyGypHvYcPDYM,988 +sympy/printing/tests/test_pycode.py,sha256=nFeQHGQ9l-R2X_Q1snMFZP4KQ0M35V48P_j9kdahW4Q,15894 +sympy/printing/tests/test_python.py,sha256=HN7JkzQcKSnB6718i7kaEJZ5pYMqu56z1mSmHQGzY4k,8128 +sympy/printing/tests/test_rcode.py,sha256=PqYfr3akhhBcmswU3QLSFNyrmNTc92irTn0Wf_2jdv4,13779 +sympy/printing/tests/test_repr.py,sha256=sj3bAdBShn0itw2yYsAuDOuRPfKQSKJy2R8cPlLdDnY,12689 +sympy/printing/tests/test_rust.py,sha256=eZTYJ3zN5LEt8tl5KhADg1HwcrofhSQswagP_zcxoMw,11504 +sympy/printing/tests/test_smtlib.py,sha256=b4Ou4bTp8E_fFzlg6vQRpWowhxR-9SB88qA_yShXjhk,20934 +sympy/printing/tests/test_str.py,sha256=m-fw28ThIk0AcCz2_0HKgUNIwe9m3YGndcb4bJ28Leo,42262 +sympy/printing/tests/test_tableform.py,sha256=Ff5l1QL2HxN32WS_TdFhUAVqzop8YoWY3Uz1TThvVIM,5692 +sympy/printing/tests/test_tensorflow.py,sha256=p-Jx4Umby9k5t5umhus-0hkuTJN7C5kEbJL_l2KdyJA,15643 +sympy/printing/tests/test_theanocode.py,sha256=E36Fj72HxMK0e1pKTkoTpv9wI4UvwHdVufo-JA6dYq0,21394 +sympy/printing/tests/test_tree.py,sha256=_8PGAhWMQ_A0f2DQLdDeMrpxY19889P5Ih9H41RZn8s,6080 +sympy/printing/theanocode.py,sha256=3RxlOR4bRjMHOta6kvBk_ZuxKM3LZvPO8WYuxrtd38g,19028 +sympy/printing/tree.py,sha256=GxEF1WIflPNShlOrZc8AZch2I6GxDlbpImHqX61_P5o,3872 +sympy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/release.py,sha256=iyi5eR6SKGqbP1Fp0_6p-darg5riTqiLfREzuz7g8UE,21 +sympy/sandbox/__init__.py,sha256=IaEVOYHaZ97OHEuto1UGthFuO35c0uvAZFZU23YyEaU,189 +sympy/sandbox/__pycache__/__init__.cpython-310.pyc,, +sympy/sandbox/__pycache__/indexed_integrals.cpython-310.pyc,, +sympy/sandbox/indexed_integrals.py,sha256=svh4xDIa8nGpDeH4TeRb49gG8miMvXpCzEarbor58EE,2141 +sympy/sandbox/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sandbox/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/sandbox/tests/__pycache__/test_indexed_integrals.cpython-310.pyc,, +sympy/sandbox/tests/test_indexed_integrals.py,sha256=UK2E2wg9EMwda4Vwpzyj3rmXs6ni33HqcbyaqAww6ww,1179 +sympy/series/__init__.py,sha256=DYG9oisjzYeS55dIUpQpbAFcoDz7Q81fZJw36PRGu14,766 +sympy/series/__pycache__/__init__.cpython-310.pyc,, +sympy/series/__pycache__/acceleration.cpython-310.pyc,, +sympy/series/__pycache__/approximants.cpython-310.pyc,, +sympy/series/__pycache__/aseries.cpython-310.pyc,, +sympy/series/__pycache__/formal.cpython-310.pyc,, +sympy/series/__pycache__/fourier.cpython-310.pyc,, +sympy/series/__pycache__/gruntz.cpython-310.pyc,, +sympy/series/__pycache__/kauers.cpython-310.pyc,, +sympy/series/__pycache__/limits.cpython-310.pyc,, +sympy/series/__pycache__/limitseq.cpython-310.pyc,, +sympy/series/__pycache__/order.cpython-310.pyc,, +sympy/series/__pycache__/residues.cpython-310.pyc,, +sympy/series/__pycache__/sequences.cpython-310.pyc,, +sympy/series/__pycache__/series.cpython-310.pyc,, +sympy/series/__pycache__/series_class.cpython-310.pyc,, +sympy/series/acceleration.py,sha256=9VTCOEOgIyOvcwjY5ZT_c4kWE-f_bL79iz_T3WGis94,3357 +sympy/series/approximants.py,sha256=tE-hHuoW62QJHDA3WhRlXaTkokCAODs1vXgjirhOYiQ,3181 +sympy/series/aseries.py,sha256=cHVGRQaza4ayqI6ji6OHNkdQEMV7Bko4f4vug2buEQY,255 +sympy/series/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/series/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/series/benchmarks/__pycache__/bench_limit.cpython-310.pyc,, +sympy/series/benchmarks/__pycache__/bench_order.cpython-310.pyc,, +sympy/series/benchmarks/bench_limit.py,sha256=2PtdeeJtD6qyEvt9HFNvyTnMM8phFZRjscgnb4fHndU,173 +sympy/series/benchmarks/bench_order.py,sha256=iC8sQJ0lLlTgiXltAyLzSCQ-3490cf-c6NFiIU44JSk,207 +sympy/series/formal.py,sha256=CtRziTUItAd8G9z__jJ9s7dRIHAOdeHajdPmNB3HRgY,51772 +sympy/series/fourier.py,sha256=dzVo4VZ8OkD9YSbBEYQudpcHcEdVMG7LfnIRTMd4Lzg,22885 +sympy/series/gruntz.py,sha256=Iex_MRKqixBX7cehe-Wro-4fNreoXBsFIjcoUvsijG8,24544 +sympy/series/kauers.py,sha256=PzD0MATMNjLjPi9GW5GQGL6Uqc2UT-uPwnzhi7TkJH8,1720 +sympy/series/limits.py,sha256=D_lAe-Y0V1n5W3JztWs34tUasTTFgNqQi4MuPZc5oJk,12820 +sympy/series/limitseq.py,sha256=WM1Lh3RXhSZM1gQaJrhWnUtYEgJunLujIEw1gmtVhYw,7752 +sympy/series/order.py,sha256=bKvLPG0QwPl3a7Qw-SMQEjkpyaTxxye7pvC27-jvt80,19255 +sympy/series/residues.py,sha256=k46s_fFfIHdJZqfst-B_-X1R-SAWs_rR9MQH7a9JLtg,2213 +sympy/series/sequences.py,sha256=S2_GtHiPY9q2BpzbVgJsD4pBf_e4yWveEwluX9rSHF4,35589 +sympy/series/series.py,sha256=crSkQK1wA6FQAKI1islG6rpAzvWlz1gZZPx2Awp43Qg,1861 +sympy/series/series_class.py,sha256=033NJ5Re8AS4eq-chmfct3-Lz2vBqdFqXtnrbxswTx0,2918 +sympy/series/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/series/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_approximants.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_aseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_demidovich.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_formal.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_fourier.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_gruntz.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_kauers.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_limits.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_limitseq.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_lseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_nseries.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_order.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_residues.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_sequences.cpython-310.pyc,, +sympy/series/tests/__pycache__/test_series.cpython-310.pyc,, +sympy/series/tests/test_approximants.py,sha256=KViHMW1dPXn7xaPYhtTQ9L_WtLLkoIic6yfFnwZ8Q70,1012 +sympy/series/tests/test_aseries.py,sha256=LblW4hBDVhigX9YvNc_HFvMm8nJMSTAT9PcUK3p-9HU,2371 +sympy/series/tests/test_demidovich.py,sha256=JGYacqJMEqHS6oT2AYs9d7iutIEb32PkJs9EJqOHxcQ,4947 +sympy/series/tests/test_formal.py,sha256=k2rqySJg6WnPSwcDyQBG7041bJxXdiYZt-KSs_IAso0,22495 +sympy/series/tests/test_fourier.py,sha256=Dknk64RWGNO8kXmpy2RRIbT8b-0CjL_35QcBugReW38,5891 +sympy/series/tests/test_gruntz.py,sha256=CRRAlU0JLygDL7pHnxfILSDAQ6UbJfaKZrClAdGB1iE,16060 +sympy/series/tests/test_kauers.py,sha256=Z85FhfXOOVki0HNGeK5BEBZOpkuB6SnKK3FqfK1-aLQ,1102 +sympy/series/tests/test_limits.py,sha256=yMw_5X2GLXybVHHMnQ0H0Nx8sXWPYK9EH8boSZBOYwo,44263 +sympy/series/tests/test_limitseq.py,sha256=QjEF99sYEDqfY7ULz1qjQTo6e0lIRUCflEOBgiDYRVA,5691 +sympy/series/tests/test_lseries.py,sha256=GlQvlBlD9wh02PPBP6zU83wmhurvGUFTuCRp44B4uI4,1875 +sympy/series/tests/test_nseries.py,sha256=uzhzYswSOe9Gh_nWKeO69tvGPMLd-9tqk4HBYX8JIm4,18284 +sympy/series/tests/test_order.py,sha256=BGB1j0vmSMS8lGwSVmBOc9apI1NM82quFwF2Hhr2bDE,16500 +sympy/series/tests/test_residues.py,sha256=pT9xzPqtmfKGSbLLAxgDVZLTSy3TOxyfq3thTJs2VLw,3178 +sympy/series/tests/test_sequences.py,sha256=Oyq32yQZnGNQDS2uJ3by3bZ-y4G9c9BFfdQTcVuW2RM,11161 +sympy/series/tests/test_series.py,sha256=rsSCpDWpZQGMo0RfrkCS5XOl--wVFmIyZcaYUoaFXdc,15478 +sympy/sets/__init__.py,sha256=3vjCm4v2esbpsVPY0ROwTXMETxns_66bG4FCIFZ96oM,1026 +sympy/sets/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/__pycache__/conditionset.cpython-310.pyc,, +sympy/sets/__pycache__/contains.cpython-310.pyc,, +sympy/sets/__pycache__/fancysets.cpython-310.pyc,, +sympy/sets/__pycache__/ordinals.cpython-310.pyc,, +sympy/sets/__pycache__/powerset.cpython-310.pyc,, +sympy/sets/__pycache__/setexpr.cpython-310.pyc,, +sympy/sets/__pycache__/sets.cpython-310.pyc,, +sympy/sets/conditionset.py,sha256=mBxxVHIFt9UfddAyvwfd-uVsM5fisNUSvBdNWH5QN_A,7825 +sympy/sets/contains.py,sha256=1jXxAFsl2ivXlT9SsGOM7s1uvS2UKEuWzNYA_bTtS6U,1234 +sympy/sets/fancysets.py,sha256=kVDkGbp316dFdR5GMWLtreltBFot8G39XM_xLvG1TkU,48118 +sympy/sets/handlers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sets/handlers/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/add.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/comparison.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/functions.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/intersection.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/issubset.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/mul.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/power.cpython-310.pyc,, +sympy/sets/handlers/__pycache__/union.cpython-310.pyc,, +sympy/sets/handlers/add.py,sha256=_ucFvxuDv9wsmKxGkCDUERtYk3I_tQxjZjY3ZkroWs0,1863 +sympy/sets/handlers/comparison.py,sha256=WfT_vLrOkvPqRg2mf7gziVs_6cLg0kOTEFv-Nb1zIvo,1601 +sympy/sets/handlers/functions.py,sha256=jYSFqFNH6mXbKFPgvIAIGY8BhbLPo1dAvcNg4MxmCaI,8381 +sympy/sets/handlers/intersection.py,sha256=rIdRTqFQzbsa0NGepzWmfoKhAd87aEqxONdOgujR_0A,16633 +sympy/sets/handlers/issubset.py,sha256=azka_5eOaUro3r3v72PmET0oY8-aaoJkzVEK7kuqXCA,4739 +sympy/sets/handlers/mul.py,sha256=XFbkOw4PDQumaOEUlHeQLvjhIom0f3iniSYv_Kau-xw,1842 +sympy/sets/handlers/power.py,sha256=84N3dIus7r09XV7PF_RiEpFRw1y5tOGD34WKzSM9F-4,3186 +sympy/sets/handlers/union.py,sha256=lrAdydqExnALUjM0dnoM-7JAZqtbgLb46Y2GGmFtQdw,4225 +sympy/sets/ordinals.py,sha256=GSyaBq7BHJC3pvgoCDoUKZQ0IE2VXyHtx6_g5OS64W4,7641 +sympy/sets/powerset.py,sha256=vIGnSYKngEPEt6V-6beDOXAOY9ugDLJ8fXOx5H9JJck,2913 +sympy/sets/setexpr.py,sha256=jMOQigDscLTrFPXvHqo1ODVRG9BqC4yn38Ej4m6WPa0,3019 +sympy/sets/sets.py,sha256=Ma1U85BlQq_VwQZzu5aVVrqK9h0f7iwsltfOleqRnUE,79027 +sympy/sets/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/sets/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_conditionset.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_contains.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_fancysets.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_ordinals.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_powerset.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_setexpr.cpython-310.pyc,, +sympy/sets/tests/__pycache__/test_sets.cpython-310.pyc,, +sympy/sets/tests/test_conditionset.py,sha256=4FdbXxobY286r5UtrCbcQPqaFIycsdlbtNO2vJmzsEI,11352 +sympy/sets/tests/test_contains.py,sha256=SYiiiedUpAevS0I2gBQ8JEWrhRBmGsvOAxjGLPRe_gg,1559 +sympy/sets/tests/test_fancysets.py,sha256=GsRbQGZK_KAGp9aIBs6TLWlLzDNJvzkrzjzdUFMhRb8,51685 +sympy/sets/tests/test_ordinals.py,sha256=L4DYc6ByQMDwJGFzJC3YhfSrVk5auW7pf4QYpJ5xY7w,2637 +sympy/sets/tests/test_powerset.py,sha256=nFvDGlhAf0wG-pZnPkgJjfwDHrTwdro3MYIinwyxn94,4805 +sympy/sets/tests/test_setexpr.py,sha256=E--SjYVzrmau0EbD8g4NTqp6aLD8qHzIuI7sAfuWxpY,14797 +sympy/sets/tests/test_sets.py,sha256=9Upkysel9pewUn77Rowv0Ct8jKduZgW2lutpGKBnQj4,66659 +sympy/simplify/__init__.py,sha256=MH1vkwHq0J5tNm7ss8V6v-mjrDGUXwfOsariIwfi38c,1274 +sympy/simplify/__pycache__/__init__.cpython-310.pyc,, +sympy/simplify/__pycache__/combsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/cse_main.cpython-310.pyc,, +sympy/simplify/__pycache__/cse_opts.cpython-310.pyc,, +sympy/simplify/__pycache__/epathtools.cpython-310.pyc,, +sympy/simplify/__pycache__/fu.cpython-310.pyc,, +sympy/simplify/__pycache__/gammasimp.cpython-310.pyc,, +sympy/simplify/__pycache__/hyperexpand.cpython-310.pyc,, +sympy/simplify/__pycache__/hyperexpand_doc.cpython-310.pyc,, +sympy/simplify/__pycache__/powsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/radsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/ratsimp.cpython-310.pyc,, +sympy/simplify/__pycache__/simplify.cpython-310.pyc,, +sympy/simplify/__pycache__/sqrtdenest.cpython-310.pyc,, +sympy/simplify/__pycache__/traversaltools.cpython-310.pyc,, +sympy/simplify/__pycache__/trigsimp.cpython-310.pyc,, +sympy/simplify/combsimp.py,sha256=XZOyP8qxowsXNbrtdUiinUFTUau4DZvivmd--Cw8Jnk,3605 +sympy/simplify/cse_main.py,sha256=4TJ15SSMyLa1rBp3FswVpkSmUDsu3uMxBkaUlyU9xZM,31349 +sympy/simplify/cse_opts.py,sha256=ZTCaOdOrgtifWxQmFzyngrLq9uwzByBdiSS5mE-DDoE,1618 +sympy/simplify/epathtools.py,sha256=YEeS5amYseT1nC4bHqyyemrjAE1qlhWz0ISXJk5I8Xo,10173 +sympy/simplify/fu.py,sha256=fgEyS5xWwvEUDWDkA7nco9k96NDxmjf3AHrP6Yc1zsg,61835 +sympy/simplify/gammasimp.py,sha256=n-TDIl7W_8RPSvpRTk8XiRSvYDBpzh55xxxWBpdXrfI,18609 +sympy/simplify/hyperexpand.py,sha256=TCqQwNyLflSgkGbuhVAohoXcMr1Dc9OgdXzeROC78Go,84437 +sympy/simplify/hyperexpand_doc.py,sha256=E8AD0mj8ULtelDSUkmJKJY7kYm5fVfCL4QH_DX65qEw,521 +sympy/simplify/powsimp.py,sha256=ThrrYTEIwQnd1cOfw-_p6ydRb1e2-7K5CU7dJpXTx-Y,26577 +sympy/simplify/radsimp.py,sha256=rE5fKX7Rf744zH_ybaTdytGNDPmGtEnd8oD9btuM_cU,41028 +sympy/simplify/ratsimp.py,sha256=s8K5jmxvPoYw8DVIpW0-h-brHlWi3a3Xj7DQoKJUjl8,7686 +sympy/simplify/simplify.py,sha256=VNAkKbQc_Mr4wxKTNfhOP4US4FccKMNI07Avj4axcQc,72902 +sympy/simplify/sqrtdenest.py,sha256=Ee1_NGJmWMG2fn2906PpyC79W-dZQdsSLNjkiT4gi1Q,21635 +sympy/simplify/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/simplify/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_combsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_cse.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_epathtools.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_fu.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_function.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_gammasimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_hyperexpand.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_powsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_radsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_ratsimp.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_rewrite.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_simplify.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_sqrtdenest.cpython-310.pyc,, +sympy/simplify/tests/__pycache__/test_trigsimp.cpython-310.pyc,, +sympy/simplify/tests/test_combsimp.py,sha256=O95WSxCvo2fDQs-UlarAcSf0_8M3PuTR76lhREDoNA8,2958 +sympy/simplify/tests/test_cse.py,sha256=pXDjx2yrL1YlT0ddzUJnZn3a1zD-Ch6I1C4TPtK9Nlk,25299 +sympy/simplify/tests/test_epathtools.py,sha256=ugsQlfuK6POiixdeit63QovsVAlG5JyCaPlPp0j35LE,3525 +sympy/simplify/tests/test_fu.py,sha256=Xqv8OyB_z3GrDUa9YdxyY98vq_XrwiMKzwMpqKx8XFQ,18651 +sympy/simplify/tests/test_function.py,sha256=gzdcSFObuDzVFJDdAgmERtZJvG38WNSmclPAdG8OaPQ,2199 +sympy/simplify/tests/test_gammasimp.py,sha256=32cPRmtG-_Mz9g02lmmn-PWDD3J_Ku6sxLxIUU7WqxE,5320 +sympy/simplify/tests/test_hyperexpand.py,sha256=tkrRq3zeOjXlH88kGiPgPHC3TTr5Y4BboC3bqDssKJc,40851 +sympy/simplify/tests/test_powsimp.py,sha256=CG5H_xSbtwZakjLzL-EEg-T9j2GOUylCU5YgLsbHm2A,14313 +sympy/simplify/tests/test_radsimp.py,sha256=7GjCVKP_nyS8s36Oxwmw6TiPRY0fG3aZP9Rd3oSksTY,18789 +sympy/simplify/tests/test_ratsimp.py,sha256=uRq7AGI957LeLOmYIXMqKkstQylK09xMYJRUflT8a-s,2210 +sympy/simplify/tests/test_rewrite.py,sha256=LZj4V6a95GJj1o3NlKRoHMk7sWGPASFlw24nsm4z43k,1127 +sympy/simplify/tests/test_simplify.py,sha256=7t9yEQCj53nrir-lItM0BSKZPgueDpul3H-Bsp-Bcu8,41565 +sympy/simplify/tests/test_sqrtdenest.py,sha256=4zRtDQVGpKRRBYSAnEF5pSM0AR_fAMumONu2Ocb3tqg,7470 +sympy/simplify/tests/test_trigsimp.py,sha256=vG5PDTDNOuFypT7H9DSMjIollPqkKdNhWv5FBj6vFnE,19949 +sympy/simplify/traversaltools.py,sha256=pn_t9Yrk_SL1X0vl-zVR6yZaxkY25D4MwTBv4ywnD1Y,409 +sympy/simplify/trigsimp.py,sha256=CasB3mOMniKbNiBDJU-SjyIFxNCKIWkgFLEsbOYlRSA,46856 +sympy/solvers/__init__.py,sha256=cqnpjbmL0YQNal_aQ-AFeCNkU1eHCpC17uaJ-Jo8COQ,2210 +sympy/solvers/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/__pycache__/bivariate.cpython-310.pyc,, +sympy/solvers/__pycache__/decompogen.cpython-310.pyc,, +sympy/solvers/__pycache__/deutils.cpython-310.pyc,, +sympy/solvers/__pycache__/inequalities.cpython-310.pyc,, +sympy/solvers/__pycache__/pde.cpython-310.pyc,, +sympy/solvers/__pycache__/polysys.cpython-310.pyc,, +sympy/solvers/__pycache__/recurr.cpython-310.pyc,, +sympy/solvers/__pycache__/solvers.cpython-310.pyc,, +sympy/solvers/__pycache__/solveset.cpython-310.pyc,, +sympy/solvers/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/benchmarks/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/benchmarks/__pycache__/bench_solvers.cpython-310.pyc,, +sympy/solvers/benchmarks/bench_solvers.py,sha256=ZVK2TIW0XjWRDBex054ymmVlSBQw-RIBhEL1wS2ZAmU,288 +sympy/solvers/bivariate.py,sha256=yrlo0AoY_MtXHP1j0qKV4UgAhSXBBpvHHRnDJuCFsC8,17869 +sympy/solvers/decompogen.py,sha256=dWQla7hp7A4RqI2a0qRNQLWNPEuur68lD3dVTyktdBU,3757 +sympy/solvers/deutils.py,sha256=6dCIoZqX8mFz77SpT1DOM_I5yvdwU1tUMnTbA2vjYME,10309 +sympy/solvers/diophantine/__init__.py,sha256=I1p3uj3kFQv20cbsZ34K5rNCx1_pDS7JwHUCFstpBgs,128 +sympy/solvers/diophantine/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc,, +sympy/solvers/diophantine/diophantine.py,sha256=oU1NhMmD2Eyzl_H5mMZw90-rxxU4A4MnwvrDswukk-8,120229 +sympy/solvers/diophantine/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/diophantine/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/diophantine/tests/__pycache__/test_diophantine.cpython-310.pyc,, +sympy/solvers/diophantine/tests/test_diophantine.py,sha256=mB79JLU5qe-9EM33USi8LmNLJjKrNuZ8TpPxaBz7gVw,42265 +sympy/solvers/inequalities.py,sha256=2IZlzDBYx8lWmW_7PVnIpTw6_FuYFsJLKvYna3nurA4,33098 +sympy/solvers/ode/__init__.py,sha256=I7RKwCcaoerflUm5i3ZDJgBIOnkhBjb83BCHcVcFqfM,468 +sympy/solvers/ode/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/hypergeometric.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/lie_group.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/nonhomogeneous.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/ode.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/riccati.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/single.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/subscheck.cpython-310.pyc,, +sympy/solvers/ode/__pycache__/systems.cpython-310.pyc,, +sympy/solvers/ode/hypergeometric.py,sha256=kizvLgjzX1VUZ1n84uT6tlOs_8NfQBW1JZVo0fJLkdM,10048 +sympy/solvers/ode/lie_group.py,sha256=tGCy_KAMuKa4gb4JR084Qy0VKu9qU1BoYBgreDX5D9Q,39242 +sympy/solvers/ode/nonhomogeneous.py,sha256=SyQVXK3BB1gEZlcK1q5LueWvpyo-U600tdnpV_87QbE,18231 +sympy/solvers/ode/ode.py,sha256=Zt6XrqtQTEPa5a7lj-r0HJ8tZoS-lJNgt8J_3kHrqyg,145088 +sympy/solvers/ode/riccati.py,sha256=Ma2sEij9Ns3onj35F7PMOLAXsFG4NAcPjP-Qp5Spt4s,30748 +sympy/solvers/ode/single.py,sha256=UtDMHdaKSYKCOfanLiwG3tAzqov5eG51fV_5dGq_agI,109468 +sympy/solvers/ode/subscheck.py,sha256=CIPca_qTxL9z5oaD2e2NrgME0eVQgF9PabZndcVqHZM,16130 +sympy/solvers/ode/systems.py,sha256=jjhV_7GdP-kpqM8Kk3xlR1Dss5rvWCC839wguTnFLhI,71526 +sympy/solvers/ode/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/ode/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_lie_group.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_ode.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_riccati.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_single.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_subscheck.cpython-310.pyc,, +sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc,, +sympy/solvers/ode/tests/test_lie_group.py,sha256=vg1yy_-a5x1Xm2IcVkEi5cD2uA5wE5gjqpfBwkV1vZc,5319 +sympy/solvers/ode/tests/test_ode.py,sha256=WsDeiS1cxO4NCDNJa99NMAqysPsOrKTQ0c6aY_u2vjc,48311 +sympy/solvers/ode/tests/test_riccati.py,sha256=-2C79UTh6WGwT8GjQ_YwdzlBrQU45f-NT7y0s1vdo8c,29352 +sympy/solvers/ode/tests/test_single.py,sha256=RV6Dl3MjY1dOQwNZk7hveZUzz8Gft6plRuIr7FmG58c,99983 +sympy/solvers/ode/tests/test_subscheck.py,sha256=Gzwc9h9n6zlNOhJ8Qh6fQDeB8ghaRmgv3ktBAfPJx-U,12468 +sympy/solvers/ode/tests/test_systems.py,sha256=Lkq84sR3pSw75d_pTAkm2_0gY45pCTKWmKmrO2zbov8,129359 +sympy/solvers/pde.py,sha256=FRFnEbD7ZJOcy8-q1LZ5NvYRt4Fu4Avf5Xe6Xk6pWoo,35659 +sympy/solvers/polysys.py,sha256=SQw-W8d5VHBfF81EYVFbcSSVUrsIHG9a9YzbkUaKIqc,13202 +sympy/solvers/recurr.py,sha256=DyssZuOyemoC6J1cWq635O7zkg1WLHrR7KGoM-gNy0g,25389 +sympy/solvers/solvers.py,sha256=bVtrpSn5jmko1ik6_JXD2rYW5ZRNKnboT0OiBDRbFRw,136170 +sympy/solvers/solveset.py,sha256=KySAjWzQfiEnVpXRHSCGh8Gq2ObJWOZf7OMmssZR5qU,141021 +sympy/solvers/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/solvers/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_constantsimp.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_decompogen.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_inequalities.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_numeric.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_pde.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_polysys.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_recurr.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc,, +sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc,, +sympy/solvers/tests/test_constantsimp.py,sha256=9Feugsg9jD2BwQiG4EFpb9fORyst6JdBmZqq2GaOgH8,8707 +sympy/solvers/tests/test_decompogen.py,sha256=7GUsDQQZtYbZIK0p0UxsOuNEJxEt4IHeOSsem_k-k0U,2943 +sympy/solvers/tests/test_inequalities.py,sha256=MuSP5v1kFL7eH_CSqOPhl6xDd1GuwRBWcZQSCwBy6Bg,20688 +sympy/solvers/tests/test_numeric.py,sha256=EeqGECpAsHoaXulCsOEJ6zAFn5i8iDy52Uo67awFAII,4738 +sympy/solvers/tests/test_pde.py,sha256=UGP3uWjF8pKQgfPifmdfvS5URVmzSg6m2NkS7LGzmio,9257 +sympy/solvers/tests/test_polysys.py,sha256=P1Jk79CAYB85L-O3KRJKpsqvwVJgqqJ_u44NigGWsaA,6873 +sympy/solvers/tests/test_recurr.py,sha256=-OeghSg16GFN70y_RUXC6CF6VU_b7NXaKDbejtRSocg,11418 +sympy/solvers/tests/test_solvers.py,sha256=hbJtihDVJQfRngUOBSz4OtV8HIkojkg528UNGtVAmr8,104484 +sympy/solvers/tests/test_solveset.py,sha256=YXl1lfZ1xnYrk_Dt4DY1gZuY9a0A5V462TPgqNfIPXk,134515 +sympy/stats/__init__.py,sha256=aNs_difmTw7e2GIfLGaPLpS-mXlttrrB3TVFPDSdGwU,8471 +sympy/stats/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/__pycache__/compound_rv.cpython-310.pyc,, +sympy/stats/__pycache__/crv.cpython-310.pyc,, +sympy/stats/__pycache__/crv_types.cpython-310.pyc,, +sympy/stats/__pycache__/drv.cpython-310.pyc,, +sympy/stats/__pycache__/drv_types.cpython-310.pyc,, +sympy/stats/__pycache__/error_prop.cpython-310.pyc,, +sympy/stats/__pycache__/frv.cpython-310.pyc,, +sympy/stats/__pycache__/frv_types.cpython-310.pyc,, +sympy/stats/__pycache__/joint_rv.cpython-310.pyc,, +sympy/stats/__pycache__/joint_rv_types.cpython-310.pyc,, +sympy/stats/__pycache__/matrix_distributions.cpython-310.pyc,, +sympy/stats/__pycache__/random_matrix.cpython-310.pyc,, +sympy/stats/__pycache__/random_matrix_models.cpython-310.pyc,, +sympy/stats/__pycache__/rv.cpython-310.pyc,, +sympy/stats/__pycache__/rv_interface.cpython-310.pyc,, +sympy/stats/__pycache__/stochastic_process.cpython-310.pyc,, +sympy/stats/__pycache__/stochastic_process_types.cpython-310.pyc,, +sympy/stats/__pycache__/symbolic_multivariate_probability.cpython-310.pyc,, +sympy/stats/__pycache__/symbolic_probability.cpython-310.pyc,, +sympy/stats/compound_rv.py,sha256=SO1KXJ0aHGbD5y9QA8o6qOHbio3ua8wyO2Rsh0Hnw48,7965 +sympy/stats/crv.py,sha256=VK7jvYiQH523ar6QvLzV_k67u0ghcCrrWlBgt3cMdaw,20979 +sympy/stats/crv_types.py,sha256=TDANQNWz_fcSq7RzyMzxEKeidlHEmzdhunmxnuGlZNk,120259 +sympy/stats/drv.py,sha256=ewxYnUlCyvaF5ceMpziiz4e6FAgknzP5cC1ZVvQ_YLE,11995 +sympy/stats/drv_types.py,sha256=q7MjAtpLjO2nFxnQOKfw_Ipf2-gYzlavbqrEcUjMQlw,19288 +sympy/stats/error_prop.py,sha256=a-H6GZEidsiP_4-iNw7nSD99AMyN6DNHsSl0IUZGIAs,3315 +sympy/stats/frv.py,sha256=C4FHAVuckxdVnXGlmT957At5xdOLVYvH76KgL44TR38,16876 +sympy/stats/frv_types.py,sha256=MP1byJwusjZKRmzsy0fMBRkzScurG2-q58puaF6TF0U,23224 +sympy/stats/joint_rv.py,sha256=DcixlO2Ml4gnwMmZk2VTegiHVq88DkLdQlOTQ57SQtc,15963 +sympy/stats/joint_rv_types.py,sha256=Yx_TL9Xx862SZo8MofErvVh-fptL9UTzalDUbnW26Lg,30633 +sympy/stats/matrix_distributions.py,sha256=3OricwEMM_NU8b2lJxoiSTml7kvqrNQ6IUIn9Xy_DsY,21953 +sympy/stats/random_matrix.py,sha256=NmzLC5JMDWI2TvH8tY6go8lYyHmqcZ-B7sSIO7z7oAk,1028 +sympy/stats/random_matrix_models.py,sha256=7i5XAUYxt-ekmP5KDMaytUlmCvxglEspoWbswSf82tE,15328 +sympy/stats/rv.py,sha256=r8G52PBmkfrVJtHUWEw1dPiBSrwTYagRdyzAweftjqk,54464 +sympy/stats/rv_interface.py,sha256=8KeUP2YG_1g4OYPrwSdZyq4R0mOO52qqBX-D225WbUg,13939 +sympy/stats/sampling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/sampling/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_numpy.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_pymc.cpython-310.pyc,, +sympy/stats/sampling/__pycache__/sample_scipy.cpython-310.pyc,, +sympy/stats/sampling/sample_numpy.py,sha256=B4ZC7ZBrSD6ICQT468rOy-xrOgQDuecsHa0zJesAeYE,4229 +sympy/stats/sampling/sample_pymc.py,sha256=9g-n04aXSFc6F7FJ5zTYtHHL6W8-26g1nrgtamJc3Hw,2995 +sympy/stats/sampling/sample_scipy.py,sha256=ysqpDy8bp1RMH0g5FFgMmp2SQuXGFkcSH7JDZEpiZ8w,6329 +sympy/stats/sampling/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/sampling/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_continuous_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_discrete_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/__pycache__/test_sample_finite_rv.cpython-310.pyc,, +sympy/stats/sampling/tests/test_sample_continuous_rv.py,sha256=Gh8hFN1hFFsthEv9wP2ZdgghQfaEnE8n7HlmyXXhN1E,5708 +sympy/stats/sampling/tests/test_sample_discrete_rv.py,sha256=jd2qnr4ABqpFcJrGcUpnTsN1z1d1prVvwUkG965oFeA,3319 +sympy/stats/sampling/tests/test_sample_finite_rv.py,sha256=dWwrFePw8eX2rBheAXi1AVxr_gqBD63VZKfW81hNoQc,3061 +sympy/stats/stochastic_process.py,sha256=pDz0rbKXTiaNmMmmz70dP3F_KWL_XhoCKFHYBNt1QeU,2312 +sympy/stats/stochastic_process_types.py,sha256=S2y3qCs7AO1EkQltN_OYkB4PsamQqcIjcPu_181wFqY,88608 +sympy/stats/symbolic_multivariate_probability.py,sha256=4wwyTYywD3TQ43Isv5KDtg-7jCyF-SW5xR5JeeqEfFM,10446 +sympy/stats/symbolic_probability.py,sha256=m0-p5hTGU2Ey7uBQrB7LSPgTvS0C8Fr-SA9d2BAX6Mk,23019 +sympy/stats/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/stats/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_compound_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_continuous_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_discrete_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_error_prop.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_finite_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_joint_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_matrix_distributions.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_mix.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_random_matrix.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_rv.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_stochastic_process.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_symbolic_multivariate.cpython-310.pyc,, +sympy/stats/tests/__pycache__/test_symbolic_probability.cpython-310.pyc,, +sympy/stats/tests/test_compound_rv.py,sha256=2927chbHTThA34Ki-ji319QT7ajQ1ueC640Mga-18ZA,6263 +sympy/stats/tests/test_continuous_rv.py,sha256=j3SFC2-4a6X2JObL3JU8znQkRXOGxz2a9XPlGPoBku0,55665 +sympy/stats/tests/test_discrete_rv.py,sha256=kr3MjfI02cPvQrQISwmsIDEEh2gpMnzZsjMd5TOhAl0,10676 +sympy/stats/tests/test_error_prop.py,sha256=xKAkw3F5XJ72xiDREI7PkyReWNVW_89CD_mjOY_diDY,1933 +sympy/stats/tests/test_finite_rv.py,sha256=JHYgY4snFF5t9qcnQfKaN5zaGsO7_SuNR7Tq234W4No,20413 +sympy/stats/tests/test_joint_rv.py,sha256=W28rCRYczv5Jax7k-bj7OveT-y-AP4q-kRR0-LNaWX0,18653 +sympy/stats/tests/test_matrix_distributions.py,sha256=9daJUiSGaLq34TeZfB-xPqC8xz6vECGrm0DdBZaQPyY,8857 +sympy/stats/tests/test_mix.py,sha256=Cplnw06Ki96Y_4fx6Bu7lUXjxoIfX7tNJasm9SOz5wQ,3991 +sympy/stats/tests/test_random_matrix.py,sha256=CiD1hV25MGHwTfHGaoaehGD3iJ4lqNYi-ZiwReO6CVk,5842 +sympy/stats/tests/test_rv.py,sha256=Bp7UwffIMO7oc8UnFV11yYGcXUjSa0NhsuOgQaNRMt8,12959 +sympy/stats/tests/test_stochastic_process.py,sha256=ufbFxlJ6El6YH7JDztMlrOjXKzrOvEyLGK30j1_lNjw,39335 +sympy/stats/tests/test_symbolic_multivariate.py,sha256=0qXWQUjBU6N5yiNO09B3QB8RfAiLBSCJ0R5n0Eo2-lQ,5576 +sympy/stats/tests/test_symbolic_probability.py,sha256=k5trScMiwSgl9dzJt30BV-t0KuYcyD-s9HtT2-hVhQ0,9398 +sympy/strategies/__init__.py,sha256=XaTAPqDoi6527juvR8LLN1mv6ZcslDrGloTTBMjJzxA,1402 +sympy/strategies/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/__pycache__/core.cpython-310.pyc,, +sympy/strategies/__pycache__/rl.cpython-310.pyc,, +sympy/strategies/__pycache__/tools.cpython-310.pyc,, +sympy/strategies/__pycache__/traverse.cpython-310.pyc,, +sympy/strategies/__pycache__/tree.cpython-310.pyc,, +sympy/strategies/__pycache__/util.cpython-310.pyc,, +sympy/strategies/branch/__init__.py,sha256=xxbMwR2LzLcQWsH9ss8ddE99VHFJTY-cYiR6xhO3tj0,356 +sympy/strategies/branch/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/core.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/tools.cpython-310.pyc,, +sympy/strategies/branch/__pycache__/traverse.cpython-310.pyc,, +sympy/strategies/branch/core.py,sha256=QiXSa7uhvmUBTLyUwBQHrYkWlOceKh5p4kVD90VnCKM,2759 +sympy/strategies/branch/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/strategies/branch/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_tools.cpython-310.pyc,, +sympy/strategies/branch/tests/__pycache__/test_traverse.cpython-310.pyc,, +sympy/strategies/branch/tests/test_core.py,sha256=23KQWJxC_2T1arwMAkt9pY1ZtG59avlxTZcVTn81UPI,2246 +sympy/strategies/branch/tests/test_tools.py,sha256=4BDkqVqrTlsivQ0PldQr6PjVZsAikc39tSxGAQA3ir8,942 +sympy/strategies/branch/tests/test_traverse.py,sha256=6rikMnZdamSzww1sSiM-aQwqa4lQrpM-DpOU9XCbiOQ,1322 +sympy/strategies/branch/tools.py,sha256=tvv3IjmQGNYbo-slCbbDf_rylZd537wvLcpdBtT-bbY,357 +sympy/strategies/branch/traverse.py,sha256=7iBViQdNpKu-AHoFED7_C9KBSyYcQBfLGopEJQbNtvk,799 +sympy/strategies/core.py,sha256=nsH6LZgyc_aslv4Na5XvJMEizC6uSzscRlVW91k1pu4,3956 +sympy/strategies/rl.py,sha256=I2puD2khbCmO3e9_ngUnclLgk1c-xBHeUf-bZu5haLM,4403 +sympy/strategies/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/strategies/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_core.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_rl.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_tools.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_traverse.cpython-310.pyc,, +sympy/strategies/tests/__pycache__/test_tree.cpython-310.pyc,, +sympy/strategies/tests/test_core.py,sha256=42XHlv1hN1S1QPEf2r9pddZ2EQL6o4FEPQvfo-UmXcw,2152 +sympy/strategies/tests/test_rl.py,sha256=wm0L6pdvddBgRcwhpiSk-nCgyzVGickfnOCkmHWS0j4,1949 +sympy/strategies/tests/test_tools.py,sha256=UdMojFIn3f1b2x2iRGv1Wfnwdso-Kl57GTyjCU_DjzQ,875 +sympy/strategies/tests/test_traverse.py,sha256=jWuZhYEt-F18_rxEMhn6OgGQ1GNs-dM_GFZ2F5nHs2I,2082 +sympy/strategies/tests/test_tree.py,sha256=9NL948rt6i9tYU6CQz9VNxE6l1begQs-MxP2euzE3Sc,2400 +sympy/strategies/tools.py,sha256=ERASzEP2SP-EcJ8p-4XyREYB15q3t81x1cyamJ-M880,1368 +sympy/strategies/traverse.py,sha256=DhPnBJ5Rw_xzhGiBtSciTyV-H2zhlxgjYVjrNH-gLyk,1183 +sympy/strategies/tree.py,sha256=ggnP9l3NIpJsssBMVKr4-yM_m8uCkrkm191ZC6MfZjc,3770 +sympy/strategies/util.py,sha256=2fbR813IY4IYco5mBoGJLu5z88OhXmwuIxgOO9IvZO4,361 +sympy/tensor/__init__.py,sha256=VMNXCRSayigQT6a3cvf5M_M-wdV-KSil_JbAmHcuUQc,870 +sympy/tensor/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/__pycache__/functions.cpython-310.pyc,, +sympy/tensor/__pycache__/index_methods.cpython-310.pyc,, +sympy/tensor/__pycache__/indexed.cpython-310.pyc,, +sympy/tensor/__pycache__/tensor.cpython-310.pyc,, +sympy/tensor/__pycache__/toperators.cpython-310.pyc,, +sympy/tensor/array/__init__.py,sha256=lTT1EwV5tb3WAvmmS_mIjhCSWSLiB0NNPW4n9_3fu0k,8244 +sympy/tensor/array/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/__pycache__/array_comprehension.cpython-310.pyc,, +sympy/tensor/array/__pycache__/array_derivatives.cpython-310.pyc,, +sympy/tensor/array/__pycache__/arrayop.cpython-310.pyc,, +sympy/tensor/array/__pycache__/dense_ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/mutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/ndim_array.cpython-310.pyc,, +sympy/tensor/array/__pycache__/sparse_ndim_array.cpython-310.pyc,, +sympy/tensor/array/array_comprehension.py,sha256=01PTIbkAGaq0CDcaI_2KsaMnYm1nxQ8sFAiHHcc__gw,12262 +sympy/tensor/array/array_derivatives.py,sha256=BWQC43h2WieqJgaCqhLV39BXN22Gb6zcy_BXerdVixA,4811 +sympy/tensor/array/arrayop.py,sha256=UYKdKQZgDsXtDopymWS8QM7FZcxR1O0D_cbt-Kjx7yM,18395 +sympy/tensor/array/dense_ndim_array.py,sha256=Ie8qVMJyp2Tsq7aVhmZpPX8X-KTlF9uaxkQfTzCZ9z8,6433 +sympy/tensor/array/expressions/__init__.py,sha256=OUMJjZY7HtWJL0ygqkdWC8LdCqibJZhHCfYeXu-eB4E,7045 +sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc,, +sympy/tensor/array/expressions/array_expressions.py,sha256=Gc0ADM3i-6sFoQTsgRHs7dRpmdH0XYVj8z9iS80vEoQ,77022 +sympy/tensor/array/expressions/arrayexpr_derivatives.py,sha256=W9-bY2LL83lLSNHXItzqjOgvf-HIDbUXPoVw8uOymcg,6249 +sympy/tensor/array/expressions/conv_array_to_indexed.py,sha256=BIwlQr7RKC8bZN3mR8ICC5TYOC9uasYcV0Zc1VNKmiE,445 +sympy/tensor/array/expressions/conv_array_to_matrix.py,sha256=85YZBTZI4o9dJtKDJXXug_lJVLG8dT_22AT7l7DKoyE,416 +sympy/tensor/array/expressions/conv_indexed_to_array.py,sha256=EyW52TplBxIx25mUDvI_5Tzc8LD6Mnp6XNW9wIw9pH4,254 +sympy/tensor/array/expressions/conv_matrix_to_array.py,sha256=XYyqt0NsQSrgNpEkr8xTGeUhR7ZYeNljVFfVEF1K7vA,250 +sympy/tensor/array/expressions/from_array_to_indexed.py,sha256=3YIcsAzWVWQRJYQS90uPvSl2dM7ZqLV_qt7E9-uYU28,3936 +sympy/tensor/array/expressions/from_array_to_matrix.py,sha256=OHkMM_yOLP6C1aAIZB-lPbz4AYS9i2shhFXGFBi9_Lc,41355 +sympy/tensor/array/expressions/from_indexed_to_array.py,sha256=RUcKemmrwuK5RFRr19YSPVMCOkZfLAWlbbB56u8Wi0g,11187 +sympy/tensor/array/expressions/from_matrix_to_array.py,sha256=yIY1RupF9-FVV3jZLsqWxZ1ckoE1-HkQyM8cQIm4_Gs,3929 +sympy/tensor/array/expressions/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc,, +sympy/tensor/array/expressions/tests/test_array_expressions.py,sha256=QUAdxQ9TvBpDEAZoJpLSWwbqjmuflPe3xBRP30lFZr0,31262 +sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py,sha256=lpC4ly6MJLDRBcVt3GcP3H6ke9bI-o3VULw0xyF5QbY,2470 +sympy/tensor/array/expressions/tests/test_as_explicit.py,sha256=nOjFKXCqYNu2O7Szc1TD1x1bsUchPRAG3nGlNGEd1Yg,2568 +sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py,sha256=6yNxGXH6BX5607FTjMkwR2t9wNVlEhV8JMSh4UIWux8,2500 +sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py,sha256=2vkSep9CPKYrQQS0u8Ayn_sc7yek1zwzjjCWK5cfYe8,29311 +sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py,sha256=RVEG_qUsXiBH9gHtWp2-9pMC4J2aLc4iUdzBFM0QyTw,8615 +sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py,sha256=G2g5E0l-FABwYyQowbKKvLcEI8NViJXaYLW3eUEcvjw,4595 +sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py,sha256=DG8IoUtxCy2acWjUHUUKu4bRsTxXbeFLFjKMLA2GdLY,1216 +sympy/tensor/array/expressions/utils.py,sha256=Rn58boHHUEoBZFtinDpruLWFBkNBwgkVQ4c9m7Nym1o,3939 +sympy/tensor/array/mutable_ndim_array.py,sha256=M0PTt8IOIcVXqQPWe2N50sm4Eq2bodRXV4Vkd08crXk,277 +sympy/tensor/array/ndim_array.py,sha256=_UYVi2vd1zI0asXN7B53e0mp2plgVT5xvB71A_L63Ao,19060 +sympy/tensor/array/sparse_ndim_array.py,sha256=4nD_Hg-JdC_1mYQTohmKFfL5M1Ugdq0fpnDUILkTtq8,6387 +sympy/tensor/array/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/array/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_array_comprehension.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_array_derivatives.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_arrayop.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_immutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_mutable_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_ndim_array.cpython-310.pyc,, +sympy/tensor/array/tests/__pycache__/test_ndim_array_conversions.cpython-310.pyc,, +sympy/tensor/array/tests/test_array_comprehension.py,sha256=32n8ZKV4_5DeJ0F7fM_Xo0i0mx6m9w3uWUI2a6OXhzY,4750 +sympy/tensor/array/tests/test_array_derivatives.py,sha256=3O2nD4_d1TFP75qcGJ8XD4DwfPblFzKhY6fAgNQ9KJ0,1609 +sympy/tensor/array/tests/test_arrayop.py,sha256=WahGcUnArsAo9eaMqGT7_AjKons0WgFzLOWTtNvnSEI,25844 +sympy/tensor/array/tests/test_immutable_ndim_array.py,sha256=9ji_14szn-qoL6DQ5muzIFNaXefT7n55PFigXoFwk50,15823 +sympy/tensor/array/tests/test_mutable_ndim_array.py,sha256=rFFa0o0AJYgPNnpqijl91Vb9EW2kgHGQc6cu9f1fIvY,13070 +sympy/tensor/array/tests/test_ndim_array.py,sha256=KH-9LAME3ldVIu5n7Vd_Xr36dN4frCdiF9qZdBWETu0,2232 +sympy/tensor/array/tests/test_ndim_array_conversions.py,sha256=CUGDCbCcslACy3Ngq-zoig9JnO4yHTw3IPcKy0FnRpw,648 +sympy/tensor/functions.py,sha256=3jkzxjMvHHsWchz-0wvuOSFvkNqnoG5knknPCEsZ1bk,4166 +sympy/tensor/index_methods.py,sha256=dcX9kNKLHi_XXkFHBPS-fcM-PaeYKkX80jmzxC0siiQ,15434 +sympy/tensor/indexed.py,sha256=dLic-2CMpPXItLsJCjIUrRDEio-mH2Dcu3H0NgRo3Do,24660 +sympy/tensor/tensor.py,sha256=MEUQJM7NA40rzlZTV1D5PBR_SdIf7K3bVT2ixzqkYKw,165096 +sympy/tensor/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/tensor/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_index_methods.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_indexed.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor_element.cpython-310.pyc,, +sympy/tensor/tests/__pycache__/test_tensor_operators.cpython-310.pyc,, +sympy/tensor/tests/test_functions.py,sha256=rBBHjJIUA2oR83UgEJ_GIASDWfTZXDzOllmcO90XYDU,1552 +sympy/tensor/tests/test_index_methods.py,sha256=Pu951z4yYYMOXBKcNteH63hTAxmNX8702nSQH_pciFE,7112 +sympy/tensor/tests/test_indexed.py,sha256=pCvqmScU0oQxx44qm9T3MkKIXKgVFRDkSHLDhSNqOIY,16157 +sympy/tensor/tests/test_printing.py,sha256=sUx_rChNTWFKPNwVl296QXO-d4-yemDJnkEHFislsmc,424 +sympy/tensor/tests/test_tensor.py,sha256=JybH2AAbEGNob44I6vl7uiiy_VpmR4O4gKCZOfwDPWE,75044 +sympy/tensor/tests/test_tensor_element.py,sha256=1dF96FtqUGaJzethw23vJIj3H5KdxsU1Xyd4DU54EB4,908 +sympy/tensor/tests/test_tensor_operators.py,sha256=sOwu-U28098Lg0iV_9RfYxvJ8wAd5Rk6_vAivWdkc9Q,17945 +sympy/tensor/toperators.py,sha256=fniTUpdYz0OvtNnFgrHINedX86FxVcxfKj9l_l1p9Rw,8840 +sympy/testing/__init__.py,sha256=YhdM87Kfsci8340HmKrXVmA4y0z_VeUN5QQbwAOvEbg,139 +sympy/testing/__pycache__/__init__.cpython-310.pyc,, +sympy/testing/__pycache__/matrices.cpython-310.pyc,, +sympy/testing/__pycache__/pytest.cpython-310.pyc,, +sympy/testing/__pycache__/quality_unicode.cpython-310.pyc,, +sympy/testing/__pycache__/randtest.cpython-310.pyc,, +sympy/testing/__pycache__/runtests.cpython-310.pyc,, +sympy/testing/__pycache__/tmpfiles.cpython-310.pyc,, +sympy/testing/matrices.py,sha256=VWBPdjIUYNHE7fdbYcmQwQTYcIWpOP9tFn9A0rGCBmE,216 +sympy/testing/pytest.py,sha256=VsbyFXAwDHWc69AxJZBml7U_Mun6kS5NutziSH6l-RE,13142 +sympy/testing/quality_unicode.py,sha256=aJma-KtrKgusUL1jz5IADz7q6vc70rsfbT9NtxJDeV4,3318 +sympy/testing/randtest.py,sha256=IKDFAm8b72Z1OkT7vpgnZjaW5LsSU_wf6g35sCkq9I0,562 +sympy/testing/runtests.py,sha256=QbirfrvKseYmrM2kLjHHhNGNgO6DsHJS1ncuH5PnPT4,88921 +sympy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/testing/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/testing/tests/__pycache__/diagnose_imports.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_code_quality.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_deprecated.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_module_imports.cpython-310.pyc,, +sympy/testing/tests/__pycache__/test_pytest.cpython-310.pyc,, +sympy/testing/tests/diagnose_imports.py,sha256=ZtSLMYNT1-RUvPlCUpYzj97aE3NafvGgp0UzRXOPd0Q,9694 +sympy/testing/tests/test_code_quality.py,sha256=JTVznHG1HKBmy3Or4_gFjBlAi0L1BJ2wjgZLUu5zBa0,19237 +sympy/testing/tests/test_deprecated.py,sha256=wQZHs4wDNuK4flaKKLsJW6XRMtrVjMv_5rUP3WspgPA,183 +sympy/testing/tests/test_module_imports.py,sha256=5w6F6JW6K7lgpbB4X9Tj0Vw8AcNVlfaSuvbwKXJKD6c,1459 +sympy/testing/tests/test_pytest.py,sha256=iKO10Tvua1Xem6a22IWH4SDrpFfr-bM-rXx039Ua7YA,6778 +sympy/testing/tmpfiles.py,sha256=bF8ktKC9lDhS65gahB9hOewsZ378UkhLgq3QHiqWYXU,1042 +sympy/this.py,sha256=XfOkN5EIM2RuDxSm_q6k_R_WtkIoSy6PXWKp3aAXvoc,550 +sympy/unify/__init__.py,sha256=Upa9h7SSr9W1PXo0WkNESsGsMZ85rcWkeruBtkAi3Fg,293 +sympy/unify/__pycache__/__init__.cpython-310.pyc,, +sympy/unify/__pycache__/core.cpython-310.pyc,, +sympy/unify/__pycache__/rewrite.cpython-310.pyc,, +sympy/unify/__pycache__/usympy.cpython-310.pyc,, +sympy/unify/core.py,sha256=-BCNPPMdfZuhhIWqyn9pYJoO8yFPGDX78Hn2551ABuE,7037 +sympy/unify/rewrite.py,sha256=Emr8Uoum3gxKpMDqFHJIjx3xChArUIN6XIy6NPfCS8I,1798 +sympy/unify/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/unify/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_rewrite.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_sympy.cpython-310.pyc,, +sympy/unify/tests/__pycache__/test_unify.cpython-310.pyc,, +sympy/unify/tests/test_rewrite.py,sha256=BgA8zmdz9Nw-Xbu4-w3UABeWypqLvmy9VzL744EmYtE,2002 +sympy/unify/tests/test_sympy.py,sha256=UCItZJNAx9dG5F7O27pyXUF1-e6aOwkZ-cVdB6SZFZc,5922 +sympy/unify/tests/test_unify.py,sha256=4TlgchV6NWuBekJx9RGlMjx3-UwonzgIYXDytb7sBRU,3029 +sympy/unify/usympy.py,sha256=6Kxx96FXSdqXimLseVK_FkYwy2vqWhNnxMVPMRShvy4,3964 +sympy/utilities/__init__.py,sha256=nbQhzII8dw5zd4hQJ2SUyriK5dOrqf-bbjy10XKQXPw,840 +sympy/utilities/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/__pycache__/autowrap.cpython-310.pyc,, +sympy/utilities/__pycache__/codegen.cpython-310.pyc,, +sympy/utilities/__pycache__/decorator.cpython-310.pyc,, +sympy/utilities/__pycache__/enumerative.cpython-310.pyc,, +sympy/utilities/__pycache__/exceptions.cpython-310.pyc,, +sympy/utilities/__pycache__/iterables.cpython-310.pyc,, +sympy/utilities/__pycache__/lambdify.cpython-310.pyc,, +sympy/utilities/__pycache__/magic.cpython-310.pyc,, +sympy/utilities/__pycache__/matchpy_connector.cpython-310.pyc,, +sympy/utilities/__pycache__/memoization.cpython-310.pyc,, +sympy/utilities/__pycache__/misc.cpython-310.pyc,, +sympy/utilities/__pycache__/pkgdata.cpython-310.pyc,, +sympy/utilities/__pycache__/pytest.cpython-310.pyc,, +sympy/utilities/__pycache__/randtest.cpython-310.pyc,, +sympy/utilities/__pycache__/runtests.cpython-310.pyc,, +sympy/utilities/__pycache__/source.cpython-310.pyc,, +sympy/utilities/__pycache__/timeutils.cpython-310.pyc,, +sympy/utilities/__pycache__/tmpfiles.cpython-310.pyc,, +sympy/utilities/_compilation/__init__.py,sha256=uYUDPbwrMTbGEMVuago32EN_ix8fsi5M0SvcLOtwMOk,751 +sympy/utilities/_compilation/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/availability.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/compilation.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/runners.cpython-310.pyc,, +sympy/utilities/_compilation/__pycache__/util.cpython-310.pyc,, +sympy/utilities/_compilation/availability.py,sha256=ybxp3mboH5772JHTWKBN1D-cs6QxATQiaL4zJVV4RE0,2884 +sympy/utilities/_compilation/compilation.py,sha256=t6UrVUHDrk7im_mYXx8s7ZkyUEkllhx38u7AAk5Z1P8,21675 +sympy/utilities/_compilation/runners.py,sha256=mb8_rvyx68qekMx8yZZyBH5G7bX94QG6W3lJ17rBmGU,8974 +sympy/utilities/_compilation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/utilities/_compilation/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/_compilation/tests/__pycache__/test_compilation.cpython-310.pyc,, +sympy/utilities/_compilation/tests/test_compilation.py,sha256=MORW8RsdmQTgFpYR7PLRQ35gxFYup3ejQu0byiIxmig,1735 +sympy/utilities/_compilation/util.py,sha256=3ZVUy732fHXFm6oK2EE13M-tztpG5G5vy4FcJ-V3SwY,7928 +sympy/utilities/autowrap.py,sha256=MNoV81PCxJvlk9_aG87jUpWkGhn03WCCk0SPG54nRoc,41123 +sympy/utilities/codegen.py,sha256=WbFTgzQPlCf-0O-gk8X-r9pxMnz4j8roObFsCThVl4Q,81495 +sympy/utilities/decorator.py,sha256=RTwHzeF1N9WMe6apBkYM2vaJcDoP683Ze548S3T_NN8,10925 +sympy/utilities/enumerative.py,sha256=pYpty2YDgvF5LBrmiAVyiqpiqhfFeYTfQfS7sTQMNks,43621 +sympy/utilities/exceptions.py,sha256=g9fgLCjrkuYk-ImX_V42ve2XIayK01mWmlXKOIVmW_8,10571 +sympy/utilities/iterables.py,sha256=VpGyggsMbqd2CL2TRSX1Iozp1G4VMIPNS7FMME-hPAw,90920 +sympy/utilities/lambdify.py,sha256=2DLVtqwhws_PAPVzxS5nh7YVfICAdGKxYGVNQ9p9mrg,55149 +sympy/utilities/magic.py,sha256=ofrwi1-xwMWb4VCQOEIwe4J1QAwxOscigDq26uSn3iY,400 +sympy/utilities/matchpy_connector.py,sha256=045re8zEDdr70Ey39OWRq0xnM6OsKBISiu9SB4nJ90g,10068 +sympy/utilities/mathml/__init__.py,sha256=3AG_eTJ4I7071riTqesIi1A3bykCeIUES2CTEYxfrPI,2299 +sympy/utilities/mathml/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/mathml/data/mmlctop.xsl,sha256=fi3CTNyg-mSscOGYBXLJv8veE_ItR_YTFMJ4jmjp6aE,114444 +sympy/utilities/mathml/data/mmltex.xsl,sha256=haX7emZOfD6_nbn5BjK93F-C85mSS8KogAbIBsW1aBA,137304 +sympy/utilities/mathml/data/simple_mmlctop.xsl,sha256=lhL-HXG_FfsJZhjeHbD7Ou8RnUaStI0-5VFcggsogjA,114432 +sympy/utilities/memoization.py,sha256=ZGOUUmwJCNRhHVZjTF4j65WjQ6VUoCeC1E8DkjryU00,1429 +sympy/utilities/misc.py,sha256=7N6LNt5N9eR2AK-_jmdOXXKhyhbW4kLRY8O5wYw3VgI,16007 +sympy/utilities/pkgdata.py,sha256=jt-hKL0xhxnDJDI9C2IXtH_QgYYtfq9fX9kJ3E7iang,1788 +sympy/utilities/pytest.py,sha256=F9TGNtoNvQUdlt5HYU084ITNmc7__7MBCSLLulBlM_Y,435 +sympy/utilities/randtest.py,sha256=aYUX_mgmQyfRdMjEOWaHM506CZ6WUK0eFuew0vFTwRs,430 +sympy/utilities/runtests.py,sha256=hYnDNiFNnDjQcXG04_3lzPFbUz6i0AUZ2rZ_RECVoDo,446 +sympy/utilities/source.py,sha256=ShIXRNtplSEfZNi5VDYD3yi6305eRz4TmchEOEvcicw,1127 +sympy/utilities/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/utilities/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_autowrap.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_julia.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_octave.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_codegen_rust.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_decorator.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_deprecated.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_enumerative.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_exceptions.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_iterables.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_lambdify.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_matchpy_connector.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_mathml.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_misc.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_pickling.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_source.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_timeutils.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc,, +sympy/utilities/tests/__pycache__/test_xxe.cpython-310.pyc,, +sympy/utilities/tests/test_autowrap.py,sha256=NW20YQiJgEofZ0xr4Ggocix4fAsBmnyankmbxPf54Fk,14603 +sympy/utilities/tests/test_codegen.py,sha256=PLuSicBhnspClTiSeKCJgKd1NyU0qBkDRvQMrwm_gLc,55496 +sympy/utilities/tests/test_codegen_julia.py,sha256=kb3soJ1L7lTfZkYJKytfY_aKoHt6fkNjWhYblebzThw,18543 +sympy/utilities/tests/test_codegen_octave.py,sha256=_yd9uGKHZzwUFpderSa9E2cYqt8JMcEtBuN6U7_7bJ0,17833 +sympy/utilities/tests/test_codegen_rust.py,sha256=wJh6YmDfq8haGjJDniDaVUsDIKEj3rT_OB4r6uLI77Y,12323 +sympy/utilities/tests/test_decorator.py,sha256=VYUvzUrVI7I7MK0YZxLLEmEu4pV5dqaB1CLEJ8Ocav4,3705 +sympy/utilities/tests/test_deprecated.py,sha256=LRrZ2UxuXnK6Jwxl8vT0EdLT-q-7jLkTC69U9JjuYYU,489 +sympy/utilities/tests/test_enumerative.py,sha256=aUw6nbSzBp8h_pk35YZ_uzRncRoLYStblodeiDRFk6I,6089 +sympy/utilities/tests/test_exceptions.py,sha256=OKRa2yuHMtnVcnisu-xcaedi2RKsH9QrgU9exgoOK30,716 +sympy/utilities/tests/test_iterables.py,sha256=fPlgquV8GaZEIAjCwxE5DnXjGJUQlt6PGR7yj-gBLJ8,34905 +sympy/utilities/tests/test_lambdify.py,sha256=COnloXr7-MetPh-YonB1h6sEy5UkzBYWTdNuEGuduew,59594 +sympy/utilities/tests/test_matchpy_connector.py,sha256=dUfDfIdofKYufww29jV8mVQmglU1AnG2uEyREpNY7V0,4506 +sympy/utilities/tests/test_mathml.py,sha256=-6z1MRYEH4eYQi2_wt8zmdjwtt5Cn483zqsvD-o_r70,836 +sympy/utilities/tests/test_misc.py,sha256=TxjUNCosyCR5w1iJ6o77yKB4WBLyirVhOaALGYdkN9k,4726 +sympy/utilities/tests/test_pickling.py,sha256=JxsZSIVrXrscDwZ0Bvx4DkyLSEIyXUzoO96qrOx-5tU,23301 +sympy/utilities/tests/test_source.py,sha256=ObjrJxZFVhLgXjVmFHUy7bti9UPPgOh5Cptw8lHW9mM,289 +sympy/utilities/tests/test_timeutils.py,sha256=sCRC6BCSho1e9n4clke3QXHx4a3qYLru-bddS_sEmFA,337 +sympy/utilities/tests/test_wester.py,sha256=6_o3Dm4fT3R-TZEinuel2VFdZth0BOgPTPFYSEIcDX0,94546 +sympy/utilities/tests/test_xxe.py,sha256=xk1j0Dd96wsGYKRNDzXTW0hTQejGCfiZcEhYcYiqojg,66 +sympy/utilities/timeutils.py,sha256=DUtQYONkJnWjU2FvAbvxuRMkGmXpLMeaiOcH7R9Os9o,1968 +sympy/utilities/tmpfiles.py,sha256=yOjbs90sEtVc00YZyveyblT8zkwj4o70_RmuEKdKq_s,445 +sympy/vector/__init__.py,sha256=8a4cSQ1sJ5uirdMoHnV7SWXU3zJPKt_0ojona8C-p1Y,1909 +sympy/vector/__pycache__/__init__.cpython-310.pyc,, +sympy/vector/__pycache__/basisdependent.cpython-310.pyc,, +sympy/vector/__pycache__/coordsysrect.cpython-310.pyc,, +sympy/vector/__pycache__/deloperator.cpython-310.pyc,, +sympy/vector/__pycache__/dyadic.cpython-310.pyc,, +sympy/vector/__pycache__/functions.cpython-310.pyc,, +sympy/vector/__pycache__/implicitregion.cpython-310.pyc,, +sympy/vector/__pycache__/integrals.cpython-310.pyc,, +sympy/vector/__pycache__/operators.cpython-310.pyc,, +sympy/vector/__pycache__/orienters.cpython-310.pyc,, +sympy/vector/__pycache__/parametricregion.cpython-310.pyc,, +sympy/vector/__pycache__/point.cpython-310.pyc,, +sympy/vector/__pycache__/scalar.cpython-310.pyc,, +sympy/vector/__pycache__/vector.cpython-310.pyc,, +sympy/vector/basisdependent.py,sha256=BTTlFGRnZIvpvK_WEK4Tk_WZXEXYGosx9fWTuMO4M0o,11553 +sympy/vector/coordsysrect.py,sha256=1JV4GBgG99JKIWo2snYMMgIJCdob3XcwYqq9s8d6fA8,36859 +sympy/vector/deloperator.py,sha256=4BJNjmI342HkVRmeQkqauqvibKsf2HOuzknQTfQMkpg,3191 +sympy/vector/dyadic.py,sha256=IOyrgONyGDHPtG0RINcMgetAVMSOmYI5a99s-OwXBTA,8571 +sympy/vector/functions.py,sha256=auLfE1Su2kLtkRvlB_7Wol8O0_sqei1hojun3pkDRYI,15552 +sympy/vector/implicitregion.py,sha256=WrCIFuh_KZ6iEA7FZzYanZoUQuJ4gNBP3NeNKMxC0l0,16155 +sympy/vector/integrals.py,sha256=x8DrvKXPznE05JgnZ7I3IWLWrvFl9SEghGaFmHrBaE4,6837 +sympy/vector/operators.py,sha256=mI6d0eIxVcoDeH5PrhtPTzhxX_RXByX_4hjXeBTeq88,9521 +sympy/vector/orienters.py,sha256=EtWNWfOvAuy_wipam9SA7_muKSrsP-43UPRCCz56sb0,11798 +sympy/vector/parametricregion.py,sha256=3YyY0fkFNelR6ldi8XYRWpkFEvqY5-rFg_vT3NFute0,5932 +sympy/vector/point.py,sha256=ozYlInnlsmIpKBEr5Ui331T1lnAB5zS2_pHYh9k_eMs,4516 +sympy/vector/scalar.py,sha256=Z2f2wiK7BS73ctYTyNvn3gB74mXZuENpScLi_M1SpYg,1962 +sympy/vector/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sympy/vector/tests/__pycache__/__init__.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_coordsysrect.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_dyadic.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_field_functions.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_functions.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_implicitregion.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_integrals.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_operators.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_parametricregion.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_printing.cpython-310.pyc,, +sympy/vector/tests/__pycache__/test_vector.cpython-310.pyc,, +sympy/vector/tests/test_coordsysrect.py,sha256=q9n9OIG_CpD4KQN20dzwRZIXoMv7VSgp8fHmVnkZfr0,19595 +sympy/vector/tests/test_dyadic.py,sha256=f1R-BL_63VBbc0XgEX_LYzV_3OupYd4hp5RzRk6dAbI,4949 +sympy/vector/tests/test_field_functions.py,sha256=v9l8Ex8K2MsPGxqAPhpEgu6WAo6wS6qvdWLKQMxgE4A,14094 +sympy/vector/tests/test_functions.py,sha256=Bs2sekdDJyw_wrUpG7vZQGH0y0S4C4AbxGSpeU_8C2s,8050 +sympy/vector/tests/test_implicitregion.py,sha256=wVilD5H-MhHiW58QT6P5U7uT79JdKHm9D7JgZoi6BE4,4028 +sympy/vector/tests/test_integrals.py,sha256=BVRhrr_JeAsCKv_E-kA2jaXB8ZXTfj7nkNgT5o-XOJc,5093 +sympy/vector/tests/test_operators.py,sha256=KexUWvc_Nwp2HWrEbhxiO7MeaFxYlckrp__Tkwg-wmU,1613 +sympy/vector/tests/test_parametricregion.py,sha256=OfKapF9A_g9X6JxgYc0UfxIhwXzRERzaj-EijQCJONw,4009 +sympy/vector/tests/test_printing.py,sha256=3BeW55iQ4qXdfDTFqptE2ufJPJIBOzdfIYVx84n_EwA,7708 +sympy/vector/tests/test_vector.py,sha256=Mo88Jgmy3CuSQz25WSH34EnZSs_JBY7E-OKPO2SjhPc,7861 +sympy/vector/vector.py,sha256=pikmeLwkdW_6ed-Xo_U0_a2Om5TGSlfE4PijkRsJllc,17911 diff --git a/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sympy-1.12.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any +