applied-ai-018 commited on
Commit
93502d3
·
verified ·
1 Parent(s): 0c481ef

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. env-llmeval/bin/python +3 -0
  3. env-llmeval/lib/python3.10/site-packages/__editable__.lm_eval-0.4.2.pth +3 -0
  4. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py +101 -0
  21. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py +267 -0
  22. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  23. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__init__.py +225 -0
  24. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/accelerate/utils/bnb.py +467 -0
  30. env-llmeval/lib/python3.10/site-packages/accelerate/utils/constants.py +72 -0
  31. env-llmeval/lib/python3.10/site-packages/accelerate/utils/dataclasses.py +1717 -0
  32. env-llmeval/lib/python3.10/site-packages/accelerate/utils/deepspeed.py +271 -0
  33. env-llmeval/lib/python3.10/site-packages/accelerate/utils/environment.py +274 -0
  34. env-llmeval/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py +209 -0
  35. env-llmeval/lib/python3.10/site-packages/accelerate/utils/imports.py +385 -0
  36. env-llmeval/lib/python3.10/site-packages/accelerate/utils/launch.py +624 -0
  37. env-llmeval/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py +1435 -0
  38. env-llmeval/lib/python3.10/site-packages/accelerate/utils/memory.py +158 -0
  39. env-llmeval/lib/python3.10/site-packages/accelerate/utils/modeling.py +1800 -0
  40. env-llmeval/lib/python3.10/site-packages/accelerate/utils/offload.py +213 -0
  41. env-llmeval/lib/python3.10/site-packages/accelerate/utils/operations.py +851 -0
  42. env-llmeval/lib/python3.10/site-packages/accelerate/utils/other.py +366 -0
  43. env-llmeval/lib/python3.10/site-packages/accelerate/utils/random.py +122 -0
  44. env-llmeval/lib/python3.10/site-packages/accelerate/utils/rich.py +24 -0
  45. env-llmeval/lib/python3.10/site-packages/accelerate/utils/torch_xla.py +51 -0
  46. env-llmeval/lib/python3.10/site-packages/accelerate/utils/tqdm.py +37 -0
  47. env-llmeval/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py +84 -0
  48. env-llmeval/lib/python3.10/site-packages/accelerate/utils/versions.py +56 -0
  49. env-llmeval/lib/python3.10/site-packages/numpy/__config__.py +162 -0
  50. env-llmeval/lib/python3.10/site-packages/numpy/__init__.cython-30.pxd +1050 -0
.gitattributes CHANGED
@@ -117,3 +117,6 @@ llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 f
117
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
118
  env-llmeval/bin/python3 filter=lfs diff=lfs merge=lfs -text
119
  env-llmeval/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
117
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
118
  env-llmeval/bin/python3 filter=lfs diff=lfs merge=lfs -text
119
  env-llmeval/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
120
+ env-llmeval/bin/python filter=lfs diff=lfs merge=lfs -text
121
+ llmeval-env/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
122
+ llmeval-env/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
env-llmeval/bin/python ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91
3
+ size 5904904
env-llmeval/lib/python3.10/site-packages/__editable__.lm_eval-0.4.2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b87d24b5f41e9dfa2760bdd38e88bee0db23d5b34659d3ff52d013edad9d5ec
3
+ size 85
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/estimate.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/launch.cpython-310.pyc ADDED
Binary file (28.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/test.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc ADDED
Binary file (7.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from ...utils.dataclasses import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ DynamoBackend,
23
+ PrecisionType,
24
+ SageMakerDistributedType,
25
+ )
26
+ from ..menu import BulletMenu
27
+
28
+
29
+ DYNAMO_BACKENDS = [
30
+ "EAGER",
31
+ "AOT_EAGER",
32
+ "INDUCTOR",
33
+ "AOT_TS_NVFUSER",
34
+ "NVPRIMS_NVFUSER",
35
+ "CUDAGRAPHS",
36
+ "OFI",
37
+ "FX2TRT",
38
+ "ONNXRT",
39
+ "TENSORRT",
40
+ "IPEX",
41
+ "TVM",
42
+ ]
43
+
44
+
45
+ def _ask_field(input_text, convert_value=None, default=None, error_message=None):
46
+ ask_again = True
47
+ while ask_again:
48
+ result = input(input_text)
49
+ try:
50
+ if default is not None and len(result) == 0:
51
+ return default
52
+ return convert_value(result) if convert_value is not None else result
53
+ except Exception:
54
+ if error_message is not None:
55
+ print(error_message)
56
+
57
+
58
+ def _ask_options(input_text, options=[], convert_value=None, default=0):
59
+ menu = BulletMenu(input_text, options)
60
+ result = menu.run(default_choice=default)
61
+ return convert_value(result) if convert_value is not None else result
62
+
63
+
64
+ def _convert_compute_environment(value):
65
+ value = int(value)
66
+ return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
67
+
68
+
69
+ def _convert_distributed_mode(value):
70
+ value = int(value)
71
+ return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "XLA"][value])
72
+
73
+
74
+ def _convert_dynamo_backend(value):
75
+ value = int(value)
76
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
77
+
78
+
79
+ def _convert_mixed_precision(value):
80
+ value = int(value)
81
+ return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
82
+
83
+
84
+ def _convert_sagemaker_distributed_mode(value):
85
+ value = int(value)
86
+ return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
87
+
88
+
89
+ def _convert_yes_no_to_bool(value):
90
+ return {"yes": True, "no": False}[value.lower()]
91
+
92
+
93
+ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
94
+ """
95
+ A custom formatter that will remove the usage line from the help message for subcommands.
96
+ """
97
+
98
+ def _format_usage(self, usage, actions, groups, prefix):
99
+ usage = super()._format_usage(usage, actions, groups, prefix)
100
+ usage = usage.replace("<command> [<args>] ", "")
101
+ return usage
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import json
17
+ import os
18
+
19
+ from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
20
+ from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
21
+ from ...utils.imports import is_boto3_available
22
+ from .config_args import SageMakerConfig
23
+ from .config_utils import (
24
+ DYNAMO_BACKENDS,
25
+ _ask_field,
26
+ _ask_options,
27
+ _convert_dynamo_backend,
28
+ _convert_mixed_precision,
29
+ _convert_sagemaker_distributed_mode,
30
+ _convert_yes_no_to_bool,
31
+ )
32
+
33
+
34
+ if is_boto3_available():
35
+ import boto3 # noqa: F401
36
+
37
+
38
+ def _create_iam_role_for_sagemaker(role_name):
39
+ iam_client = boto3.client("iam")
40
+
41
+ sagemaker_trust_policy = {
42
+ "Version": "2012-10-17",
43
+ "Statement": [
44
+ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
45
+ ],
46
+ }
47
+ try:
48
+ # create the role, associated with the chosen trust policy
49
+ iam_client.create_role(
50
+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)
51
+ )
52
+ policy_document = {
53
+ "Version": "2012-10-17",
54
+ "Statement": [
55
+ {
56
+ "Effect": "Allow",
57
+ "Action": [
58
+ "sagemaker:*",
59
+ "ecr:GetDownloadUrlForLayer",
60
+ "ecr:BatchGetImage",
61
+ "ecr:BatchCheckLayerAvailability",
62
+ "ecr:GetAuthorizationToken",
63
+ "cloudwatch:PutMetricData",
64
+ "cloudwatch:GetMetricData",
65
+ "cloudwatch:GetMetricStatistics",
66
+ "cloudwatch:ListMetrics",
67
+ "logs:CreateLogGroup",
68
+ "logs:CreateLogStream",
69
+ "logs:DescribeLogStreams",
70
+ "logs:PutLogEvents",
71
+ "logs:GetLogEvents",
72
+ "s3:CreateBucket",
73
+ "s3:ListBucket",
74
+ "s3:GetBucketLocation",
75
+ "s3:GetObject",
76
+ "s3:PutObject",
77
+ ],
78
+ "Resource": "*",
79
+ }
80
+ ],
81
+ }
82
+ # attach policy to role
83
+ iam_client.put_role_policy(
84
+ RoleName=role_name,
85
+ PolicyName=f"{role_name}_policy_permission",
86
+ PolicyDocument=json.dumps(policy_document, indent=2),
87
+ )
88
+ except iam_client.exceptions.EntityAlreadyExistsException:
89
+ print(f"role {role_name} already exists. Using existing one")
90
+
91
+
92
+ def _get_iam_role_arn(role_name):
93
+ iam_client = boto3.client("iam")
94
+ return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
95
+
96
+
97
+ def get_sagemaker_input():
98
+ credentials_configuration = _ask_options(
99
+ "How do you want to authorize?",
100
+ ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
101
+ int,
102
+ )
103
+ aws_profile = None
104
+ if credentials_configuration == 0:
105
+ aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default")
106
+ os.environ["AWS_PROFILE"] = aws_profile
107
+ else:
108
+ print(
109
+ "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
110
+ "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`"
111
+ )
112
+ aws_access_key_id = _ask_field("AWS Access Key ID: ")
113
+ os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
114
+
115
+ aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
116
+ os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
117
+
118
+ aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
119
+ os.environ["AWS_DEFAULT_REGION"] = aws_region
120
+
121
+ role_management = _ask_options(
122
+ "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
123
+ ["Provide IAM Role name", "Create new IAM role using credentials"],
124
+ int,
125
+ )
126
+ if role_management == 0:
127
+ iam_role_name = _ask_field("Enter your IAM role name: ")
128
+ else:
129
+ iam_role_name = "accelerate_sagemaker_execution_role"
130
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
131
+ _create_iam_role_for_sagemaker(iam_role_name)
132
+
133
+ is_custom_docker_image = _ask_field(
134
+ "Do you want to use custom Docker image? [yes/NO]: ",
135
+ _convert_yes_no_to_bool,
136
+ default=False,
137
+ error_message="Please enter yes or no.",
138
+ )
139
+ docker_image = None
140
+ if is_custom_docker_image:
141
+ docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
142
+
143
+ is_sagemaker_inputs_enabled = _ask_field(
144
+ "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
145
+ _convert_yes_no_to_bool,
146
+ default=False,
147
+ error_message="Please enter yes or no.",
148
+ )
149
+ sagemaker_inputs_file = None
150
+ if is_sagemaker_inputs_enabled:
151
+ sagemaker_inputs_file = _ask_field(
152
+ "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
153
+ lambda x: str(x).lower(),
154
+ )
155
+
156
+ is_sagemaker_metrics_enabled = _ask_field(
157
+ "Do you want to enable SageMaker metrics? [yes/NO]: ",
158
+ _convert_yes_no_to_bool,
159
+ default=False,
160
+ error_message="Please enter yes or no.",
161
+ )
162
+ sagemaker_metrics_file = None
163
+ if is_sagemaker_metrics_enabled:
164
+ sagemaker_metrics_file = _ask_field(
165
+ "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
166
+ lambda x: str(x).lower(),
167
+ )
168
+
169
+ distributed_type = _ask_options(
170
+ "What is the distributed mode?",
171
+ ["No distributed training", "Data parallelism"],
172
+ _convert_sagemaker_distributed_mode,
173
+ )
174
+ dynamo_config = {}
175
+ use_dynamo = _ask_field(
176
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
177
+ _convert_yes_no_to_bool,
178
+ default=False,
179
+ error_message="Please enter yes or no.",
180
+ )
181
+ if use_dynamo:
182
+ prefix = "dynamo_"
183
+ dynamo_config[prefix + "backend"] = _ask_options(
184
+ "Which dynamo backend would you like to use?",
185
+ [x.lower() for x in DYNAMO_BACKENDS],
186
+ _convert_dynamo_backend,
187
+ default=2,
188
+ )
189
+ use_custom_options = _ask_field(
190
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
191
+ _convert_yes_no_to_bool,
192
+ default=False,
193
+ error_message="Please enter yes or no.",
194
+ )
195
+
196
+ if use_custom_options:
197
+ dynamo_config[prefix + "mode"] = _ask_options(
198
+ "Which mode do you want to use?",
199
+ TORCH_DYNAMO_MODES,
200
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
201
+ default="default",
202
+ )
203
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
204
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
205
+ _convert_yes_no_to_bool,
206
+ default=False,
207
+ error_message="Please enter yes or no.",
208
+ )
209
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
210
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
211
+ _convert_yes_no_to_bool,
212
+ default=False,
213
+ error_message="Please enter yes or no.",
214
+ )
215
+ ec2_instance_query = "Which EC2 instance type you want to use for your training?"
216
+ if distributed_type != SageMakerDistributedType.NO:
217
+ ec2_instance_type = _ask_options(
218
+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
219
+ )
220
+ else:
221
+ ec2_instance_query += "? [ml.p3.2xlarge]:"
222
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
223
+
224
+ debug = False
225
+ if distributed_type != SageMakerDistributedType.NO:
226
+ debug = _ask_field(
227
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
228
+ _convert_yes_no_to_bool,
229
+ default=False,
230
+ error_message="Please enter yes or no.",
231
+ )
232
+
233
+ num_machines = 1
234
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
235
+ num_machines = _ask_field(
236
+ "How many machines do you want use? [1]: ",
237
+ int,
238
+ default=1,
239
+ )
240
+
241
+ mixed_precision = _ask_options(
242
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
243
+ ["no", "fp16", "bf16", "fp8"],
244
+ _convert_mixed_precision,
245
+ )
246
+
247
+ if use_dynamo and mixed_precision == "no":
248
+ print(
249
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
250
+ )
251
+
252
+ return SageMakerConfig(
253
+ image_uri=docker_image,
254
+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
255
+ distributed_type=distributed_type,
256
+ use_cpu=False,
257
+ dynamo_config=dynamo_config,
258
+ ec2_instance_type=ec2_instance_type,
259
+ profile=aws_profile,
260
+ region=aws_region,
261
+ iam_role_name=iam_role_name,
262
+ mixed_precision=mixed_precision,
263
+ num_machines=num_machines,
264
+ sagemaker_inputs_file=sagemaker_inputs_file,
265
+ sagemaker_metrics_file=sagemaker_metrics_file,
266
+ debug=debug,
267
+ )
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__init__.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .constants import (
15
+ MODEL_NAME,
16
+ OPTIMIZER_NAME,
17
+ RNG_STATE_NAME,
18
+ SAFE_MODEL_NAME,
19
+ SAFE_WEIGHTS_INDEX_NAME,
20
+ SAFE_WEIGHTS_NAME,
21
+ SAMPLER_NAME,
22
+ SCALER_NAME,
23
+ SCHEDULER_NAME,
24
+ TORCH_DISTRIBUTED_OPERATION_TYPES,
25
+ TORCH_LAUNCH_PARAMS,
26
+ WEIGHTS_INDEX_NAME,
27
+ WEIGHTS_NAME,
28
+ )
29
+ from .dataclasses import (
30
+ AutocastKwargs,
31
+ BnbQuantizationConfig,
32
+ ComputeEnvironment,
33
+ CustomDtype,
34
+ DataLoaderConfiguration,
35
+ DeepSpeedPlugin,
36
+ DistributedDataParallelKwargs,
37
+ DistributedType,
38
+ DynamoBackend,
39
+ FP8RecipeKwargs,
40
+ FullyShardedDataParallelPlugin,
41
+ GradientAccumulationPlugin,
42
+ GradScalerKwargs,
43
+ InitProcessGroupKwargs,
44
+ KwargsHandler,
45
+ LoggerType,
46
+ MegatronLMPlugin,
47
+ PrecisionType,
48
+ ProjectConfiguration,
49
+ RNGType,
50
+ SageMakerDistributedType,
51
+ TensorInformation,
52
+ TorchDynamoPlugin,
53
+ )
54
+ from .environment import (
55
+ are_libraries_initialized,
56
+ check_cuda_p2p_ib_support,
57
+ check_fp8_capability,
58
+ convert_dict_to_env_variables,
59
+ get_cpu_distributed_information,
60
+ get_gpu_info,
61
+ get_int_from_env,
62
+ parse_choice_from_env,
63
+ parse_flag_from_env,
64
+ set_numa_affinity,
65
+ str_to_bool,
66
+ )
67
+ from .imports import (
68
+ get_ccl_version,
69
+ is_4bit_bnb_available,
70
+ is_8bit_bnb_available,
71
+ is_aim_available,
72
+ is_bf16_available,
73
+ is_bnb_available,
74
+ is_boto3_available,
75
+ is_ccl_available,
76
+ is_clearml_available,
77
+ is_comet_ml_available,
78
+ is_cuda_available,
79
+ is_datasets_available,
80
+ is_deepspeed_available,
81
+ is_dvclive_available,
82
+ is_fp8_available,
83
+ is_ipex_available,
84
+ is_megatron_lm_available,
85
+ is_mlflow_available,
86
+ is_mlu_available,
87
+ is_mps_available,
88
+ is_msamp_available,
89
+ is_npu_available,
90
+ is_pandas_available,
91
+ is_peft_available,
92
+ is_pippy_available,
93
+ is_pynvml_available,
94
+ is_rich_available,
95
+ is_sagemaker_available,
96
+ is_tensorboard_available,
97
+ is_timm_available,
98
+ is_torch_xla_available,
99
+ is_transformer_engine_available,
100
+ is_transformers_available,
101
+ is_wandb_available,
102
+ is_xpu_available,
103
+ )
104
+ from .modeling import (
105
+ calculate_maximum_sizes,
106
+ check_device_map,
107
+ check_tied_parameters_in_config,
108
+ check_tied_parameters_on_same_device,
109
+ compute_module_sizes,
110
+ convert_file_size_to_int,
111
+ dtype_byte_size,
112
+ find_tied_parameters,
113
+ get_balanced_memory,
114
+ get_max_layer_size,
115
+ get_max_memory,
116
+ get_mixed_precision_context_manager,
117
+ id_tensor_storage,
118
+ infer_auto_device_map,
119
+ is_peft_model,
120
+ load_checkpoint_in_model,
121
+ load_offloaded_weights,
122
+ load_state_dict,
123
+ named_module_tensors,
124
+ retie_parameters,
125
+ set_module_tensor_to_device,
126
+ shard_checkpoint,
127
+ )
128
+ from .offload import (
129
+ OffloadedWeightsLoader,
130
+ PrefixedDataset,
131
+ extract_submodules_state_dict,
132
+ load_offloaded_weight,
133
+ offload_state_dict,
134
+ offload_weight,
135
+ save_offload_index,
136
+ )
137
+ from .operations import (
138
+ CannotPadNestedTensorWarning,
139
+ broadcast,
140
+ broadcast_object_list,
141
+ concatenate,
142
+ convert_outputs_to_fp32,
143
+ convert_to_fp32,
144
+ copy_tensor_to_devices,
145
+ find_batch_size,
146
+ find_device,
147
+ gather,
148
+ gather_object,
149
+ get_data_structure,
150
+ honor_type,
151
+ ignorant_find_batch_size,
152
+ initialize_tensors,
153
+ is_namedtuple,
154
+ is_tensor_information,
155
+ is_torch_tensor,
156
+ listify,
157
+ pad_across_processes,
158
+ pad_input_tensors,
159
+ recursively_apply,
160
+ reduce,
161
+ send_to_device,
162
+ slice_tensors,
163
+ )
164
+ from .versions import compare_versions, is_torch_version
165
+
166
+
167
+ if is_deepspeed_available():
168
+ from .deepspeed import (
169
+ DeepSpeedEngineWrapper,
170
+ DeepSpeedOptimizerWrapper,
171
+ DeepSpeedSchedulerWrapper,
172
+ DummyOptim,
173
+ DummyScheduler,
174
+ HfDeepSpeedConfig,
175
+ )
176
+
177
+ from .bnb import has_4bit_bnb_layers, load_and_quantize_model
178
+ from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
179
+ from .launch import (
180
+ PrepareForLaunch,
181
+ _filter_args,
182
+ prepare_deepspeed_cmd_env,
183
+ prepare_multi_gpu_env,
184
+ prepare_sagemager_args_inputs,
185
+ prepare_simple_launcher_cmd_env,
186
+ prepare_tpu,
187
+ )
188
+ from .megatron_lm import (
189
+ AbstractTrainStep,
190
+ BertTrainStep,
191
+ GPTTrainStep,
192
+ MegatronEngine,
193
+ MegatronLMDummyDataLoader,
194
+ MegatronLMDummyScheduler,
195
+ MegatronLMOptimizerWrapper,
196
+ MegatronLMSchedulerWrapper,
197
+ T5TrainStep,
198
+ avg_losses_across_data_parallel_group,
199
+ gather_across_data_parallel_groups,
200
+ )
201
+ from .megatron_lm import initialize as megatron_lm_initialize
202
+ from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
203
+ from .megatron_lm import prepare_model as megatron_lm_prepare_model
204
+ from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
205
+ from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
206
+ from .memory import find_executable_batch_size, release_memory
207
+ from .other import (
208
+ check_os_kernel,
209
+ clean_state_dict_for_safetensors,
210
+ clear_environment,
211
+ convert_bytes,
212
+ extract_model_from_parallel,
213
+ get_pretty_name,
214
+ is_port_in_use,
215
+ merge_dicts,
216
+ patch_environment,
217
+ recursive_getattr,
218
+ save,
219
+ wait_for_everyone,
220
+ write_basic_config,
221
+ )
222
+ from .random import set_seed, synchronize_rng_state, synchronize_rng_states
223
+ from .torch_xla import install_xla
224
+ from .tqdm import tqdm
225
+ from .transformer_engine import convert_model, has_transformer_engine_layers
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc ADDED
Binary file (57.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc ADDED
Binary file (4.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/utils/bnb.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import logging
17
+ import os
18
+ from copy import deepcopy
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from accelerate.utils.imports import (
25
+ is_4bit_bnb_available,
26
+ is_8bit_bnb_available,
27
+ )
28
+
29
+ from ..big_modeling import dispatch_model, init_empty_weights
30
+ from .dataclasses import BnbQuantizationConfig
31
+ from .modeling import (
32
+ find_tied_parameters,
33
+ get_balanced_memory,
34
+ infer_auto_device_map,
35
+ load_checkpoint_in_model,
36
+ offload_weight,
37
+ set_module_tensor_to_device,
38
+ )
39
+
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def load_and_quantize_model(
45
+ model: torch.nn.Module,
46
+ bnb_quantization_config: BnbQuantizationConfig,
47
+ weights_location: Union[str, os.PathLike] = None,
48
+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
49
+ no_split_module_classes: Optional[List[str]] = None,
50
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
51
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
52
+ offload_state_dict: bool = False,
53
+ ):
54
+ """
55
+ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the
56
+ model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the
57
+ model is already loaded, we will quantize the model and put the model on the GPU,
58
+
59
+ Args:
60
+ model (`torch.nn.Module`):
61
+ Input model. The model can be already loaded or on the meta device
62
+ bnb_quantization_config (`BnbQuantizationConfig`):
63
+ The bitsandbytes quantization parameters
64
+ weights_location (`str` or `os.PathLike`):
65
+ The folder weights_location to load. It can be:
66
+ - a path to a file containing a whole model state dict
67
+ - a path to a `.json` file containing the index to a sharded checkpoint
68
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
69
+ - a path to a folder containing a unique pytorch_model.bin file.
70
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
71
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
72
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
73
+ no_split_module_classes (`List[str]`, *optional*):
74
+ A list of layer class names that should never be split across device (for instance any layer that has a
75
+ residual connection).
76
+ max_memory (`Dict`, *optional*):
77
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
78
+ offload_folder (`str` or `os.PathLike`, *optional*):
79
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
80
+ offload_state_dict (`bool`, *optional*, defaults to `False`):
81
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
82
+ the weight of the CPU state dict + the biggest shard does not fit.
83
+
84
+ Returns:
85
+ `torch.nn.Module`: The quantized model
86
+ """
87
+
88
+ load_in_4bit = bnb_quantization_config.load_in_4bit
89
+ load_in_8bit = bnb_quantization_config.load_in_8bit
90
+
91
+ if load_in_8bit and not is_8bit_bnb_available():
92
+ raise ImportError(
93
+ "You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
94
+ " make sure you have the latest version of `bitsandbytes` installed."
95
+ )
96
+ if load_in_4bit and not is_4bit_bnb_available():
97
+ raise ValueError(
98
+ "You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
99
+ "make sure you have the latest version of `bitsandbytes` installed."
100
+ )
101
+
102
+ modules_on_cpu = []
103
+ # custom device map
104
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
105
+ modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
106
+
107
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
108
+ if bnb_quantization_config.skip_modules is None:
109
+ bnb_quantization_config.skip_modules = get_keys_to_not_convert(model)
110
+
111
+ # add cpu modules to skip modules only for 4-bit modules
112
+ if load_in_4bit:
113
+ bnb_quantization_config.skip_modules.extend(modules_on_cpu)
114
+ modules_to_not_convert = bnb_quantization_config.skip_modules
115
+
116
+ # We add the modules we want to keep in full precision
117
+ if bnb_quantization_config.keep_in_fp32_modules is None:
118
+ bnb_quantization_config.keep_in_fp32_modules = []
119
+ keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules
120
+ modules_to_not_convert.extend(keep_in_fp32_modules)
121
+
122
+ # compatibility with peft
123
+ model.is_loaded_in_4bit = load_in_4bit
124
+ model.is_loaded_in_8bit = load_in_8bit
125
+
126
+ model_device = get_parameter_device(model)
127
+ if model_device.type != "meta":
128
+ # quantization of an already loaded model
129
+ logger.warning(
130
+ "It is not recommended to quantize a loaded model. "
131
+ "The model should be instantiated under the `init_empty_weights` context manager."
132
+ )
133
+ model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert)
134
+ # convert param to the right dtype
135
+ dtype = bnb_quantization_config.torch_dtype
136
+ for name, param in model.state_dict().items():
137
+ if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):
138
+ param.to(torch.float32)
139
+ if param.dtype != torch.float32:
140
+ name = name.replace(".weight", "").replace(".bias", "")
141
+ param = getattr(model, name, None)
142
+ if param is not None:
143
+ param.to(torch.float32)
144
+ elif torch.is_floating_point(param):
145
+ param.to(dtype)
146
+ if model_device.type == "cuda":
147
+ # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
148
+ model.cuda(torch.cuda.current_device())
149
+ torch.cuda.empty_cache()
150
+ elif torch.cuda.is_available():
151
+ model.to(torch.cuda.current_device())
152
+ else:
153
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
154
+ logger.info(
155
+ f"The model device type is {model_device.type}. However, cuda is needed for quantization."
156
+ "We move the model to cuda."
157
+ )
158
+ return model
159
+
160
+ elif weights_location is None:
161
+ raise RuntimeError(
162
+ f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} "
163
+ )
164
+
165
+ else:
166
+ with init_empty_weights():
167
+ model = replace_with_bnb_layers(
168
+ model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert
169
+ )
170
+
171
+ device_map = get_quantized_model_device_map(
172
+ model,
173
+ bnb_quantization_config,
174
+ device_map,
175
+ max_memory=max_memory,
176
+ no_split_module_classes=no_split_module_classes,
177
+ )
178
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
179
+ offload_state_dict = True
180
+
181
+ offload = any(x in list(device_map.values()) for x in ["cpu", "disk"])
182
+
183
+ load_checkpoint_in_model(
184
+ model,
185
+ weights_location,
186
+ device_map,
187
+ dtype=bnb_quantization_config.torch_dtype,
188
+ offload_folder=offload_folder,
189
+ offload_state_dict=offload_state_dict,
190
+ keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules,
191
+ offload_8bit_bnb=load_in_8bit and offload,
192
+ )
193
+ return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
194
+
195
+
196
+ def get_quantized_model_device_map(
197
+ model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None
198
+ ):
199
+ if device_map is None:
200
+ if torch.cuda.is_available():
201
+ device_map = {"": torch.cuda.current_device()}
202
+ else:
203
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
204
+ logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
205
+
206
+ if isinstance(device_map, str):
207
+ if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
208
+ raise ValueError(
209
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
210
+ "'sequential'."
211
+ )
212
+
213
+ special_dtypes = {}
214
+ special_dtypes.update(
215
+ {
216
+ name: bnb_quantization_config.torch_dtype
217
+ for name, _ in model.named_parameters()
218
+ if any(m in name for m in bnb_quantization_config.skip_modules)
219
+ }
220
+ )
221
+ special_dtypes.update(
222
+ {
223
+ name: torch.float32
224
+ for name, _ in model.named_parameters()
225
+ if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules)
226
+ }
227
+ )
228
+
229
+ kwargs = {}
230
+ kwargs["special_dtypes"] = special_dtypes
231
+ kwargs["no_split_module_classes"] = no_split_module_classes
232
+ kwargs["dtype"] = bnb_quantization_config.target_dtype
233
+
234
+ # get max_memory for each device.
235
+ if device_map != "sequential":
236
+ max_memory = get_balanced_memory(
237
+ model,
238
+ low_zero=(device_map == "balanced_low_0"),
239
+ max_memory=max_memory,
240
+ **kwargs,
241
+ )
242
+
243
+ kwargs["max_memory"] = max_memory
244
+ device_map = infer_auto_device_map(model, **kwargs)
245
+
246
+ if isinstance(device_map, dict):
247
+ # check if don't have any quantized module on the cpu
248
+ modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules
249
+
250
+ device_map_without_some_modules = {
251
+ key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
252
+ }
253
+ for device in ["cpu", "disk"]:
254
+ if device in device_map_without_some_modules.values():
255
+ if bnb_quantization_config.load_in_4bit:
256
+ raise ValueError(
257
+ """
258
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
259
+ the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
260
+ these modules in `torch_dtype`, you need to pass a custom `device_map` to
261
+ `load_and_quantize_model`. Check
262
+ https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
263
+ for more details.
264
+ """
265
+ )
266
+ else:
267
+ logger.info(
268
+ "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit"
269
+ )
270
+ del device_map_without_some_modules
271
+ return device_map
272
+
273
+
274
+ def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None):
275
+ """
276
+ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit`
277
+ modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules.
278
+
279
+ Parameters:
280
+ model (`torch.nn.Module`):
281
+ Input model or `torch.nn.Module` as the function is run recursively.
282
+ modules_to_not_convert (`List[str]`):
283
+ Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for
284
+ numerical stability reasons.
285
+ current_key_name (`List[str]`, *optional*):
286
+ An array to track the current key of the recursion. This is used to check whether the current key (part of
287
+ it) is not in the list of modules to not convert.
288
+ """
289
+
290
+ if modules_to_not_convert is None:
291
+ modules_to_not_convert = []
292
+
293
+ model, has_been_replaced = _replace_with_bnb_layers(
294
+ model, bnb_quantization_config, modules_to_not_convert, current_key_name
295
+ )
296
+ if not has_been_replaced:
297
+ logger.warning(
298
+ "You are loading your model in 8bit or 4bit but no linear modules were found in your model."
299
+ " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
300
+ " Please double check your model architecture, or submit an issue on github if you think this is"
301
+ " a bug."
302
+ )
303
+ return model
304
+
305
+
306
+ def _replace_with_bnb_layers(
307
+ model,
308
+ bnb_quantization_config,
309
+ modules_to_not_convert=None,
310
+ current_key_name=None,
311
+ ):
312
+ """
313
+ Private method that wraps the recursion for module replacement.
314
+
315
+ Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
316
+ """
317
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
318
+ import bitsandbytes as bnb
319
+
320
+ has_been_replaced = False
321
+ for name, module in model.named_children():
322
+ if current_key_name is None:
323
+ current_key_name = []
324
+ current_key_name.append(name)
325
+ if isinstance(module, nn.Linear) and name not in modules_to_not_convert:
326
+ # Check if the current key is not in the `modules_to_not_convert`
327
+ current_key_name_str = ".".join(current_key_name)
328
+ proceed = True
329
+ for key in modules_to_not_convert:
330
+ if (
331
+ (key in current_key_name_str) and (key + "." in current_key_name_str)
332
+ ) or key == current_key_name_str:
333
+ proceed = False
334
+ break
335
+ if proceed:
336
+ # Load bnb module with empty weight and replace ``nn.Linear` module
337
+ if bnb_quantization_config.load_in_8bit:
338
+ bnb_module = bnb.nn.Linear8bitLt(
339
+ module.in_features,
340
+ module.out_features,
341
+ module.bias is not None,
342
+ has_fp16_weights=False,
343
+ threshold=bnb_quantization_config.llm_int8_threshold,
344
+ )
345
+ elif bnb_quantization_config.load_in_4bit:
346
+ bnb_module = bnb.nn.Linear4bit(
347
+ module.in_features,
348
+ module.out_features,
349
+ module.bias is not None,
350
+ bnb_quantization_config.bnb_4bit_compute_dtype,
351
+ compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant,
352
+ quant_type=bnb_quantization_config.bnb_4bit_quant_type,
353
+ )
354
+ else:
355
+ raise ValueError("load_in_8bit and load_in_4bit can't be both False")
356
+ bnb_module.weight.data = module.weight.data
357
+ if module.bias is not None:
358
+ bnb_module.bias.data = module.bias.data
359
+ bnb_module.requires_grad_(False)
360
+ setattr(model, name, bnb_module)
361
+ has_been_replaced = True
362
+ if len(list(module.children())) > 0:
363
+ _, _has_been_replaced = _replace_with_bnb_layers(
364
+ module, bnb_quantization_config, modules_to_not_convert, current_key_name
365
+ )
366
+ has_been_replaced = has_been_replaced | _has_been_replaced
367
+ # Remove the last key for recursion
368
+ current_key_name.pop(-1)
369
+ return model, has_been_replaced
370
+
371
+
372
+ def get_keys_to_not_convert(model):
373
+ r"""
374
+ An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
375
+ we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
376
+ to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
377
+ int8.
378
+
379
+ Parameters:
380
+ model (`torch.nn.Module`):
381
+ Input model
382
+ """
383
+ # Create a copy of the model
384
+ with init_empty_weights():
385
+ tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
386
+
387
+ tied_params = find_tied_parameters(tied_model)
388
+ # For compatibility with Accelerate < 0.18
389
+ if isinstance(tied_params, dict):
390
+ tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())
391
+ else:
392
+ tied_keys = sum(tied_params, [])
393
+ has_tied_params = len(tied_keys) > 0
394
+
395
+ # Check if it is a base model
396
+ is_base_model = False
397
+ if hasattr(model, "base_model_prefix"):
398
+ is_base_model = not hasattr(model, model.base_model_prefix)
399
+
400
+ # Ignore this for base models (BertModel, GPT2Model, etc.)
401
+ if (not has_tied_params) and is_base_model:
402
+ return []
403
+
404
+ # otherwise they have an attached head
405
+ list_modules = list(model.named_children())
406
+ list_last_module = [list_modules[-1][0]]
407
+
408
+ # add last module together with tied weights
409
+ intersection = set(list_last_module) - set(tied_keys)
410
+ list_untouched = list(set(tied_keys)) + list(intersection)
411
+
412
+ # remove ".weight" from the keys
413
+ names_to_remove = [".weight", ".bias"]
414
+ filtered_module_names = []
415
+ for name in list_untouched:
416
+ for name_to_remove in names_to_remove:
417
+ if name_to_remove in name:
418
+ name = name.replace(name_to_remove, "")
419
+ filtered_module_names.append(name)
420
+
421
+ return filtered_module_names
422
+
423
+
424
+ def has_4bit_bnb_layers(model):
425
+ """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model"""
426
+ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
427
+ import bitsandbytes as bnb
428
+
429
+ for m in model.modules():
430
+ if isinstance(m, bnb.nn.Linear4bit):
431
+ return True
432
+ return False
433
+
434
+
435
+ def get_parameter_device(parameter: nn.Module):
436
+ return next(parameter.parameters()).device
437
+
438
+
439
+ def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics):
440
+ # if it is not quantized, we quantize and offload the quantized weights and the SCB stats
441
+ if fp16_statistics is None:
442
+ set_module_tensor_to_device(model, param_name, 0, dtype=new_dtype, value=param)
443
+ tensor_name = param_name
444
+ module = model
445
+ if "." in tensor_name:
446
+ splits = tensor_name.split(".")
447
+ for split in splits[:-1]:
448
+ new_module = getattr(module, split)
449
+ if new_module is None:
450
+ raise ValueError(f"{module} has no attribute {split}.")
451
+ module = new_module
452
+ tensor_name = splits[-1]
453
+ # offload weights
454
+ module._parameters[tensor_name].requires_grad = False
455
+ offload_weight(module._parameters[tensor_name], param_name, offload_folder, index=offload_index)
456
+ if hasattr(module._parameters[tensor_name], "SCB"):
457
+ offload_weight(
458
+ module._parameters[tensor_name].SCB,
459
+ param_name.replace("weight", "SCB"),
460
+ offload_folder,
461
+ index=offload_index,
462
+ )
463
+ else:
464
+ offload_weight(param, param_name, offload_folder, index=offload_index)
465
+ offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index)
466
+
467
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
env-llmeval/lib/python3.10/site-packages/accelerate/utils/constants.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import operator as op
16
+
17
+
18
+ SCALER_NAME = "scaler.pt"
19
+ MODEL_NAME = "pytorch_model"
20
+ SAFE_MODEL_NAME = "model"
21
+ RNG_STATE_NAME = "random_states"
22
+ OPTIMIZER_NAME = "optimizer"
23
+ SCHEDULER_NAME = "scheduler"
24
+ SAMPLER_NAME = "sampler"
25
+ WEIGHTS_NAME = f"{MODEL_NAME}.bin"
26
+ WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json"
27
+ SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors"
28
+ SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json"
29
+ SAGEMAKER_PYTORCH_VERSION = "1.10.2"
30
+ SAGEMAKER_PYTHON_VERSION = "py38"
31
+ SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0"
32
+ SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
33
+ FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
34
+ FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
35
+ FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
36
+ FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
37
+ FSDP_PYTORCH_VERSION = "2.1.0"
38
+ FSDP_MODEL_NAME = "pytorch_model_fsdp"
39
+ DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
40
+ TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
41
+
42
+ STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
43
+
44
+ # These are the args for `torch.distributed.launch` for pytorch < 1.9
45
+ TORCH_LAUNCH_PARAMS = [
46
+ "nnodes",
47
+ "nproc_per_node",
48
+ "rdzv_backend",
49
+ "rdzv_endpoint",
50
+ "rdzv_id",
51
+ "rdzv_conf",
52
+ "standalone",
53
+ "max_restarts",
54
+ "monitor_interval",
55
+ "start_method",
56
+ "role",
57
+ "module",
58
+ "m",
59
+ "no_python",
60
+ "run_path",
61
+ "log_dir",
62
+ "r",
63
+ "redirects",
64
+ "t",
65
+ "tee",
66
+ "node_rank",
67
+ "master_addr",
68
+ "master_port",
69
+ ]
70
+
71
+ CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
72
+ TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_MLU", "MULTI_XPU", "MULTI_CPU"]
env-llmeval/lib/python3.10/site-packages/accelerate/utils/dataclasses.py ADDED
@@ -0,0 +1,1717 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ General namespace and dataclass related classes
17
+ """
18
+
19
+ import argparse
20
+ import copy
21
+ import enum
22
+ import functools
23
+ import os
24
+ import typing
25
+ import warnings
26
+ from contextlib import contextmanager
27
+ from dataclasses import dataclass, field
28
+ from datetime import timedelta
29
+ from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, get_args
30
+
31
+ import torch
32
+
33
+ from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE
34
+ from .environment import str_to_bool
35
+ from .imports import is_cuda_available, is_npu_available, is_xpu_available
36
+ from .versions import compare_versions
37
+
38
+
39
+ class KwargsHandler:
40
+ """
41
+ Internal mixin that implements a `to_kwargs()` method for a dataclass.
42
+ """
43
+
44
+ def to_dict(self):
45
+ return copy.deepcopy(self.__dict__)
46
+
47
+ def to_kwargs(self):
48
+ """
49
+ Returns a dictionary containing the attributes with values different from the default of this class.
50
+ """
51
+ # import clear_environment here to avoid circular import problem
52
+ from .other import clear_environment
53
+
54
+ with clear_environment():
55
+ default_dict = self.__class__().to_dict()
56
+ this_dict = self.to_dict()
57
+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}
58
+
59
+
60
+ @dataclass
61
+ class AutocastKwargs(KwargsHandler):
62
+ """
63
+ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
64
+ documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
65
+ information on each argument.
66
+
67
+ Example:
68
+
69
+ ```python
70
+ from accelerate import Accelerator
71
+ from accelerate.utils import AutocastKwargs
72
+
73
+ kwargs = AutocastKwargs(cache_enabled=True)
74
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
75
+ ```
76
+ """
77
+
78
+ enabled: bool = True
79
+ cache_enabled: bool = None
80
+
81
+
82
+ @dataclass
83
+ class DistributedDataParallelKwargs(KwargsHandler):
84
+ """
85
+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a
86
+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
87
+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
88
+ information on each argument.
89
+
90
+ <Tip warning={true}>
91
+
92
+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
93
+
94
+ `static_graph` is only available in PyTorch 1.11.0 and later versions.
95
+
96
+ </Tip>
97
+
98
+ Example:
99
+
100
+ ```python
101
+ from accelerate import Accelerator
102
+ from accelerate.utils import DistributedDataParallelKwargs
103
+
104
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
105
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
106
+ ```
107
+ """
108
+
109
+ dim: int = 0
110
+ broadcast_buffers: bool = True
111
+ bucket_cap_mb: int = 25
112
+ find_unused_parameters: bool = False
113
+ check_reduction: bool = False
114
+ gradient_as_bucket_view: bool = False
115
+ static_graph: bool = False
116
+
117
+
118
+ @dataclass
119
+ class GradScalerKwargs(KwargsHandler):
120
+ """
121
+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
122
+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
123
+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
124
+
125
+ <Tip warning={true}>
126
+
127
+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.
128
+
129
+ </Tip>
130
+
131
+ Example:
132
+
133
+ ```python
134
+ from accelerate import Accelerator
135
+ from accelerate.utils import GradScalerKwargs
136
+
137
+ kwargs = GradScalerKwargs(backoff_filter=0.25)
138
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
139
+ ```
140
+ """
141
+
142
+ init_scale: float = 65536.0
143
+ growth_factor: float = 2.0
144
+ backoff_factor: float = 0.5
145
+ growth_interval: int = 2000
146
+ enabled: bool = True
147
+
148
+
149
+ @dataclass
150
+ class InitProcessGroupKwargs(KwargsHandler):
151
+ """
152
+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
153
+ to the documentation of this
154
+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
155
+ information on each argument.
156
+
157
+ ```python
158
+ from datetime import timedelta
159
+ from accelerate import Accelerator
160
+ from accelerate.utils import InitProcessGroupKwargs
161
+
162
+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
163
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
164
+ ```
165
+ """
166
+
167
+ backend: Optional[str] = "nccl"
168
+ init_method: Optional[str] = None
169
+ timeout: timedelta = timedelta(seconds=1800)
170
+
171
+
172
+ # Literals
173
+ Backend = Literal["MSAMP", "TE"]
174
+ OptLevel = Literal["O1", "O2"]
175
+ FP8Format = Literal["E4M3", "HYBRID"]
176
+ AmaxComputeAlgorithm = Literal["max", "most_recent"]
177
+
178
+
179
+ @dataclass
180
+ class FP8RecipeKwargs(KwargsHandler):
181
+ """
182
+ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
183
+ training with `transformer-engine` or `ms-amp`.
184
+
185
+ <Tip>
186
+
187
+ For more information on `transformer-engine` args, please refer to the API
188
+ [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
189
+
190
+ For more information on the `ms-amp` args, please refer to the Optimization Level
191
+ [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
192
+
193
+ </Tip>
194
+
195
+ ```python
196
+ from accelerate import Accelerator
197
+ from accelerate.utils import FP8RecipeKwargs
198
+
199
+ kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
200
+ accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
201
+ ```
202
+
203
+ To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
204
+
205
+ ```python
206
+ kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
207
+ ```
208
+
209
+ Args:
210
+ backend (`str`, *optional*, defaults to "msamp"):
211
+ Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
212
+ margin (`int`, *optional*, default to 0):
213
+ The margin to use for the gradient scaling.
214
+ interval (`int`, *optional*, default to 1):
215
+ The interval to use for how often the scaling factor is recomputed.
216
+ fp8_format (`str`, *optional*, default to "E4M3"):
217
+ The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
218
+ amax_history_len (`int`, *optional*, default to 1024):
219
+ The length of the history to use for the scaling factor computation
220
+ amax_compute_algo (`str`, *optional*, default to "most_recent"):
221
+ The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
222
+ override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`):
223
+ Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
224
+ optimization_level (`str`), one of `O1`, `O2`. (default is `O2`):
225
+ What level of 8-bit collective communication should be used with MS-AMP. In general:
226
+ * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU
227
+ memory usage and communication bandwidth
228
+ * O2: First-order optimizer states are in 8-bit, and second order states are in FP16.
229
+ Only available when using Adam or AdamW. This maintains accuracy and can potentially save the
230
+ highest memory.
231
+ * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models
232
+ are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
233
+ available currently).
234
+ """
235
+
236
+ backend: Backend = "MSAMP"
237
+ opt_level: OptLevel = "O2"
238
+ margin: int = 0
239
+ interval: int = 1
240
+ fp8_format: FP8Format = "E4M3"
241
+ amax_history_len: int = 1
242
+ amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
243
+ override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
244
+
245
+ def __post_init__(self):
246
+ if self.backend.upper() not in get_args(Backend):
247
+ raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).")
248
+
249
+ self.backend = self.backend.upper()
250
+ # Check TE args
251
+ if self.backend == "TE":
252
+ self.fp8_format = self.fp8_format.upper()
253
+ if self.fp8_format not in get_args(FP8Format):
254
+ raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.")
255
+ if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm):
256
+ raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}")
257
+ elif self.backend == "MSAMP":
258
+ if self.opt_level not in get_args(OptLevel):
259
+ raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
260
+
261
+
262
+ class EnumWithContains(enum.EnumMeta):
263
+ "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
264
+
265
+ def __contains__(cls, item):
266
+ try:
267
+ cls(item)
268
+ except ValueError:
269
+ return False
270
+ return True
271
+
272
+
273
+ class BaseEnum(enum.Enum, metaclass=EnumWithContains):
274
+ "An enum class that can get the value of an item with `str(Enum.key)`"
275
+
276
+ def __str__(self):
277
+ return self.value
278
+
279
+ @classmethod
280
+ def list(cls):
281
+ "Method to list all the possible items in `cls`"
282
+ return list(map(str, cls))
283
+
284
+
285
+ class DeprecatedFieldDescriptor:
286
+ """
287
+ Descriptor for deprecated fields in an enum class.
288
+
289
+ Args:
290
+ field_name (`str`):
291
+ The name of the deprecated field.
292
+ replaced_with (`str`):
293
+ The name of the field that replaces the deprecated one.
294
+ """
295
+
296
+ def __init__(self, field_name, replaced_with):
297
+ self.field_name = field_name
298
+ self.replaced_with = replaced_with
299
+
300
+ def __get__(self, instance, owner):
301
+ warnings.warn(
302
+ f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. "
303
+ f"Please use the `{self.replaced_with}` instead.",
304
+ FutureWarning,
305
+ )
306
+ return getattr(owner, self.replaced_with)
307
+
308
+
309
+ class DistributedType(str, enum.Enum):
310
+ """
311
+ Represents a type of distributed environment.
312
+
313
+ Values:
314
+
315
+ - **NO** -- Not a distributed environment, just a single process.
316
+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.
317
+ - **MULTI_GPU** -- Distributed on multiple GPUs.
318
+ - **MULTI_MLU** -- Distributed on multiple MLUs.
319
+ - **MULTI_NPU** -- Distributed on multiple NPUs.
320
+ - **MULTI_XPU** -- Distributed on multiple XPUs.
321
+ - **DEEPSPEED** -- Using DeepSpeed.
322
+ - **XLA** -- Using TorchXLA.
323
+ - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead.
324
+ """
325
+
326
+ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
327
+ NO = "NO"
328
+ MULTI_CPU = "MULTI_CPU"
329
+ MULTI_GPU = "MULTI_GPU"
330
+ MULTI_NPU = "MULTI_NPU"
331
+ MULTI_MLU = "MULTI_MLU"
332
+ MULTI_XPU = "MULTI_XPU"
333
+ DEEPSPEED = "DEEPSPEED"
334
+ FSDP = "FSDP"
335
+ XLA = "XLA"
336
+ MEGATRON_LM = "MEGATRON_LM"
337
+ TPU = DeprecatedFieldDescriptor("TPU", "XLA")
338
+
339
+
340
+ class SageMakerDistributedType(str, enum.Enum):
341
+ """
342
+ Represents a type of distributed environment.
343
+
344
+ Values:
345
+
346
+ - **NO** -- Not a distributed environment, just a single process.
347
+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
348
+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
349
+ """
350
+
351
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
352
+ NO = "NO"
353
+ DATA_PARALLEL = "DATA_PARALLEL"
354
+ MODEL_PARALLEL = "MODEL_PARALLEL"
355
+
356
+
357
+ class ComputeEnvironment(str, enum.Enum):
358
+ """
359
+ Represents a type of the compute environment.
360
+
361
+ Values:
362
+
363
+ - **LOCAL_MACHINE** -- private/custom cluster hardware.
364
+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
365
+ """
366
+
367
+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
368
+ LOCAL_MACHINE = "LOCAL_MACHINE"
369
+ AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
370
+
371
+
372
+ class DynamoBackend(str, BaseEnum):
373
+ """
374
+ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html).
375
+
376
+ Values:
377
+
378
+ - **NO** -- Do not use torch dynamo.
379
+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
380
+ issues.
381
+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
382
+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
383
+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
384
+ kernels. [Read
385
+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
386
+ - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
387
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
388
+ - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
389
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
390
+ - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
391
+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
392
+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
393
+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
394
+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
395
+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
396
+ - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
397
+ more](https://github.com/onnx/onnx-tensorrt)
398
+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
399
+ more](https://github.com/intel/intel-extension-for-pytorch).
400
+ - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
401
+
402
+ """
403
+
404
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
405
+ NO = "NO"
406
+ EAGER = "EAGER"
407
+ AOT_EAGER = "AOT_EAGER"
408
+ INDUCTOR = "INDUCTOR"
409
+ AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
410
+ NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
411
+ CUDAGRAPHS = "CUDAGRAPHS"
412
+ OFI = "OFI"
413
+ FX2TRT = "FX2TRT"
414
+ ONNXRT = "ONNXRT"
415
+ TENSORRT = "TENSORRT"
416
+ IPEX = "IPEX"
417
+ TVM = "TVM"
418
+
419
+
420
+ class LoggerType(BaseEnum):
421
+ """Represents a type of supported experiment tracker
422
+
423
+ Values:
424
+
425
+ - **ALL** -- all available trackers in the environment that are supported
426
+ - **TENSORBOARD** -- TensorBoard as an experiment tracker
427
+ - **WANDB** -- wandb as an experiment tracker
428
+ - **COMETML** -- comet_ml as an experiment tracker
429
+ - **DVCLIVE** -- dvclive as an experiment tracker
430
+ """
431
+
432
+ ALL = "all"
433
+ AIM = "aim"
434
+ TENSORBOARD = "tensorboard"
435
+ WANDB = "wandb"
436
+ COMETML = "comet_ml"
437
+ MLFLOW = "mlflow"
438
+ CLEARML = "clearml"
439
+ DVCLIVE = "dvclive"
440
+
441
+
442
+ class PrecisionType(BaseEnum):
443
+ """Represents a type of precision used on floating point values
444
+
445
+ Values:
446
+
447
+ - **NO** -- using full precision (FP32)
448
+ - **FP16** -- using half precision
449
+ - **BF16** -- using brain floating point precision
450
+ """
451
+
452
+ NO = "no"
453
+ FP8 = "fp8"
454
+ FP16 = "fp16"
455
+ BF16 = "bf16"
456
+
457
+
458
+ class RNGType(BaseEnum):
459
+ TORCH = "torch"
460
+ CUDA = "cuda"
461
+ MLU = "mlu"
462
+ NPU = "npu"
463
+ XLA = "xla"
464
+ XPU = "xpu"
465
+ GENERATOR = "generator"
466
+
467
+
468
+ class CustomDtype(enum.Enum):
469
+ r"""
470
+ An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
471
+ """
472
+
473
+ FP8 = "fp8"
474
+ INT4 = "int4"
475
+ INT2 = "int2"
476
+
477
+
478
+ # data classes
479
+
480
+
481
+ @dataclass
482
+ class TensorInformation:
483
+ shape: torch.Size
484
+ dtype: torch.dtype
485
+
486
+
487
+ @dataclass
488
+ class DataLoaderConfiguration:
489
+ """
490
+ Configuration for dataloader-related items when calling `accelerator.prepare`.
491
+ """
492
+
493
+ split_batches: bool = field(
494
+ default=False,
495
+ metadata={
496
+ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
497
+ " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
498
+ " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
499
+ " in your script multiplied by the number of processes."
500
+ },
501
+ )
502
+ dispatch_batches: bool = field(
503
+ default=None,
504
+ metadata={
505
+ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
506
+ " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
507
+ " underlying dataset is an `IterableDataslet`, `False` otherwise."
508
+ },
509
+ )
510
+ even_batches: bool = field(
511
+ default=True,
512
+ metadata={
513
+ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
514
+ " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
515
+ " all workers."
516
+ },
517
+ )
518
+ use_seedable_sampler: bool = field(
519
+ default=False,
520
+ metadata={
521
+ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])."
522
+ "Ensures training results are fully reproducable using a different sampling technique. "
523
+ "While seed-to-seed results may differ, on average the differences are neglible when using"
524
+ "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
525
+ },
526
+ )
527
+
528
+
529
+ @dataclass
530
+ class ProjectConfiguration:
531
+ """
532
+ Configuration for the Accelerator object based on inner-project needs.
533
+ """
534
+
535
+ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
536
+ logging_dir: str = field(
537
+ default=None,
538
+ metadata={
539
+ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
540
+ },
541
+ )
542
+ automatic_checkpoint_naming: bool = field(
543
+ default=False,
544
+ metadata={"help": "Whether saved states should be automatically iteratively named."},
545
+ )
546
+
547
+ total_limit: int = field(
548
+ default=None,
549
+ metadata={"help": "The maximum number of total saved states to keep."},
550
+ )
551
+
552
+ iteration: int = field(
553
+ default=0,
554
+ metadata={"help": "The current save iteration."},
555
+ )
556
+
557
+ save_on_each_node: bool = field(
558
+ default=False,
559
+ metadata={
560
+ "help": (
561
+ "When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
562
+ " only on the main one"
563
+ )
564
+ },
565
+ )
566
+
567
+ def set_directories(self, project_dir: str = None):
568
+ "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
569
+ self.project_dir = project_dir
570
+ if self.logging_dir is None:
571
+ self.logging_dir = project_dir
572
+
573
+ def __post_init__(self):
574
+ self.set_directories(self.project_dir)
575
+
576
+
577
+ @dataclass
578
+ class GradientAccumulationPlugin(KwargsHandler):
579
+ """
580
+ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or
581
+ `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error.
582
+
583
+ Parameters:
584
+ num_steps (`int`):
585
+ The number of steps to accumulate gradients for.
586
+ adjust_scheduler (`bool`, *optional*, defaults to `True`):
587
+ Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be
588
+ `True` if the used scheduler was not adjusted for gradient accumulation.
589
+ sync_with_dataloader (`bool`, *optional*, defaults to `True`):
590
+ Whether to synchronize setting the gradients when at the end of the dataloader.
591
+ sync_each_batch (`bool`, *optional*):
592
+ Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory
593
+ requirements when using gradient accumulation with distributed training, at expense of speed.
594
+
595
+ Example:
596
+
597
+ ```python
598
+ from accelerate.utils import GradientAccumulationPlugin
599
+
600
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
601
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
602
+ ```
603
+ """
604
+
605
+ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
606
+ adjust_scheduler: bool = field(
607
+ default=True,
608
+ metadata={
609
+ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
610
+ },
611
+ )
612
+ sync_with_dataloader: bool = field(
613
+ default=True,
614
+ metadata={
615
+ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
616
+ },
617
+ )
618
+ sync_each_batch: bool = field(
619
+ default=False,
620
+ metadata={
621
+ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed."
622
+ },
623
+ )
624
+
625
+
626
+ @dataclass
627
+ class TorchDynamoPlugin(KwargsHandler):
628
+ """
629
+ This plugin is used to compile a model with PyTorch 2.0
630
+ """
631
+
632
+ backend: DynamoBackend = field(
633
+ default=None,
634
+ metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
635
+ )
636
+ mode: str = field(
637
+ default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
638
+ )
639
+ fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
640
+ dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
641
+ options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
642
+ disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
643
+
644
+ def __post_init__(self):
645
+ prefix = "ACCELERATE_DYNAMO_"
646
+ if self.backend is None:
647
+ self.backend = os.environ.get(prefix + "BACKEND", "no")
648
+ self.backend = DynamoBackend(self.backend.upper())
649
+ if self.mode is None:
650
+ self.mode = os.environ.get(prefix + "MODE", "default")
651
+ if self.fullgraph is None:
652
+ self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
653
+ if self.dynamic is None:
654
+ self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
655
+
656
+ def to_dict(self):
657
+ dynamo_config = copy.deepcopy(self.__dict__)
658
+ dynamo_config["backend"] = dynamo_config["backend"].value.lower()
659
+ return dynamo_config
660
+
661
+
662
+ @dataclass
663
+ class DeepSpeedPlugin:
664
+ """
665
+ This plugin is used to integrate DeepSpeed.
666
+ """
667
+
668
+ hf_ds_config: Any = field(
669
+ default=None,
670
+ metadata={
671
+ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`."
672
+ },
673
+ )
674
+ gradient_accumulation_steps: int = field(
675
+ default=None,
676
+ metadata={
677
+ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
678
+ },
679
+ )
680
+ gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
681
+ zero_stage: int = field(
682
+ default=None,
683
+ metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"},
684
+ )
685
+ is_train_batch_min: str = field(
686
+ default=True,
687
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"},
688
+ )
689
+ offload_optimizer_device: bool = field(
690
+ default=None,
691
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."},
692
+ )
693
+ offload_param_device: bool = field(
694
+ default=None,
695
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."},
696
+ )
697
+ offload_optimizer_nvme_path: str = field(
698
+ default=None,
699
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
700
+ )
701
+ offload_param_nvme_path: str = field(
702
+ default=None,
703
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
704
+ )
705
+ zero3_init_flag: bool = field(
706
+ default=None,
707
+ metadata={
708
+ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models."
709
+ "Only applicable with ZeRO Stage-3."
710
+ },
711
+ )
712
+ zero3_save_16bit_model: bool = field(
713
+ default=None,
714
+ metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
715
+ )
716
+
717
+ def __post_init__(self):
718
+ from .deepspeed import HfDeepSpeedConfig
719
+
720
+ if self.gradient_accumulation_steps is None:
721
+ gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
722
+ self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
723
+
724
+ if self.gradient_clipping is None:
725
+ gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
726
+ if gradient_clipping != "none":
727
+ self.gradient_clipping = float(gradient_clipping)
728
+
729
+ if self.zero_stage is None:
730
+ self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
731
+
732
+ if self.offload_optimizer_device is None:
733
+ self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
734
+
735
+ if self.offload_param_device is None:
736
+ self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
737
+
738
+ if self.offload_optimizer_nvme_path is None:
739
+ self.offload_optimizer_nvme_path = os.environ.get(
740
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
741
+ )
742
+
743
+ if self.offload_param_nvme_path is None:
744
+ self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
745
+
746
+ if self.zero3_save_16bit_model is None:
747
+ self.zero3_save_16bit_model = (
748
+ os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
749
+ )
750
+
751
+ if self.hf_ds_config is None:
752
+ self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
753
+ if (
754
+ isinstance(self.hf_ds_config, dict)
755
+ or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
756
+ or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
757
+ ):
758
+ if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
759
+ self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
760
+ if "gradient_accumulation_steps" not in self.hf_ds_config.config:
761
+ self.hf_ds_config.config["gradient_accumulation_steps"] = 1
762
+ if "zero_optimization" not in self.hf_ds_config.config:
763
+ raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
764
+
765
+ self._deepspeed_config_checks()
766
+ plugin_to_config_mapping = {
767
+ "gradient_accumulation_steps": "gradient_accumulation_steps",
768
+ "gradient_clipping": "gradient_clipping",
769
+ "zero_stage": "zero_optimization.stage",
770
+ "offload_optimizer_device": "zero_optimization.offload_optimizer.device",
771
+ "offload_param_device": "zero_optimization.offload_param.device",
772
+ "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
773
+ "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
774
+ "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
775
+ }
776
+ kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
777
+ for key in kwargs.keys():
778
+ self.fill_match(key, **kwargs, must_match=False)
779
+ self.hf_ds_config.set_stage_and_offload()
780
+
781
+ # filling the missing values in the class attributes from the DeepSpeed config
782
+ # when using the DeepSpeed config file.
783
+ for key, value in plugin_to_config_mapping.items():
784
+ config_value = self.hf_ds_config.get_value(value)
785
+ if config_value is not None and config_value != "auto":
786
+ setattr(self, key, config_value)
787
+ else:
788
+ config = {
789
+ "train_batch_size": "auto",
790
+ "train_micro_batch_size_per_gpu": "auto",
791
+ "gradient_accumulation_steps": self.gradient_accumulation_steps,
792
+ "zero_optimization": {
793
+ "stage": self.zero_stage,
794
+ "offload_optimizer": {
795
+ "device": self.offload_optimizer_device,
796
+ "nvme_path": self.offload_optimizer_nvme_path
797
+ if self.offload_optimizer_device == "nvme"
798
+ else None,
799
+ },
800
+ "offload_param": {
801
+ "device": self.offload_param_device,
802
+ "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
803
+ },
804
+ "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
805
+ },
806
+ }
807
+ if self.gradient_clipping:
808
+ config["gradient_clipping"] = self.gradient_clipping
809
+ self.hf_ds_config = HfDeepSpeedConfig(config)
810
+
811
+ self.deepspeed_config = self.hf_ds_config.config
812
+ self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
813
+ if self.zero3_init_flag is None:
814
+ self.zero3_init_flag = (
815
+ str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
816
+ )
817
+ if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
818
+ warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
819
+ self.zero3_init_flag = False
820
+
821
+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
822
+ mismatches = [] if mismatches is None else mismatches
823
+ config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
824
+ if config is None:
825
+ return
826
+
827
+ if config.get(ds_key) == "auto":
828
+ if ds_key_long in kwargs:
829
+ config[ds_key] = kwargs[ds_key_long]
830
+ return
831
+ else:
832
+ raise ValueError(
833
+ f"`{ds_key_long}` not found in kwargs. "
834
+ f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or "
835
+ "pass it in kwargs."
836
+ )
837
+
838
+ if not must_match:
839
+ return
840
+
841
+ ds_val = config.get(ds_key)
842
+ if ds_val is not None and ds_key_long in kwargs:
843
+ if ds_val != kwargs[ds_key_long]:
844
+ mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
845
+
846
+ def is_auto(self, ds_key_long):
847
+ val = self.hf_ds_config.get_value(ds_key_long)
848
+ if val is None:
849
+ return False
850
+ else:
851
+ return val == "auto"
852
+
853
+ def get_value(self, ds_key_long, default=None):
854
+ return self.hf_ds_config.get_value(ds_key_long, default)
855
+
856
+ def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
857
+ """Process the DeepSpeed config with the values from the kwargs."""
858
+ mismatches = [] if mismatches is None else mismatches
859
+ if config is None:
860
+ config = self.deepspeed_config
861
+ for key, value in config.items():
862
+ if isinstance(value, dict):
863
+ self.deepspeed_config_process(
864
+ prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs
865
+ )
866
+ else:
867
+ self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)
868
+ if len(mismatches) > 0 and prefix == "":
869
+ mismatches_msg = "\n".join(mismatches)
870
+ raise ValueError(
871
+ "Please correct the following DeepSpeed config values that mismatch kwargs "
872
+ f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
873
+ )
874
+
875
+ def set_mixed_precision(self, mixed_precision):
876
+ ds_config = self.deepspeed_config
877
+ kwargs = {
878
+ "fp16.enabled": mixed_precision == "fp16",
879
+ "bf16.enabled": mixed_precision == "bf16",
880
+ }
881
+ if mixed_precision == "fp16":
882
+ if "fp16" not in ds_config:
883
+ ds_config["fp16"] = {"enabled": True, "auto_cast": True}
884
+ elif mixed_precision == "bf16":
885
+ if "bf16" not in ds_config:
886
+ ds_config["bf16"] = {"enabled": True}
887
+
888
+ if mixed_precision != "no":
889
+ diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
890
+ if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
891
+ raise ValueError(
892
+ f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
893
+ )
894
+ for dtype in ["fp16", "bf16"]:
895
+ if dtype not in ds_config:
896
+ ds_config[dtype] = {"enabled": False}
897
+ self.fill_match("fp16.enabled", must_match=False, **kwargs)
898
+ self.fill_match("bf16.enabled", must_match=False, **kwargs)
899
+
900
+ def set_deepspeed_weakref(self):
901
+ from .imports import is_transformers_available
902
+
903
+ if self.zero3_init_flag:
904
+ if not is_transformers_available():
905
+ raise Exception(
906
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
907
+ "Please run `pip install transformers`."
908
+ )
909
+ ds_config = copy.deepcopy(self.deepspeed_config)
910
+ if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto":
911
+ ds_config["gradient_accumulation_steps"] = 1
912
+ if (
913
+ "train_micro_batch_size_per_gpu" not in ds_config
914
+ or ds_config["train_micro_batch_size_per_gpu"] == "auto"
915
+ ):
916
+ ds_config["train_micro_batch_size_per_gpu"] = 1
917
+ if ds_config.get("train_batch_size", None) == "auto":
918
+ del ds_config["train_batch_size"]
919
+
920
+ if compare_versions("transformers", "<", "4.33"):
921
+ from transformers.deepspeed import HfDeepSpeedConfig
922
+ else:
923
+ from transformers.integrations import HfDeepSpeedConfig
924
+
925
+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
926
+
927
+ def is_zero3_init_enabled(self):
928
+ return self.zero3_init_flag
929
+
930
+ @contextmanager
931
+ def zero3_init_context_manager(self, enable=False):
932
+ old = self.zero3_init_flag
933
+ if old == enable:
934
+ yield
935
+ else:
936
+ self.zero3_init_flag = enable
937
+ self.dschf = None
938
+ self.set_deepspeed_weakref()
939
+ yield
940
+ self.zero3_init_flag = old
941
+ self.dschf = None
942
+ self.set_deepspeed_weakref()
943
+
944
+ def _deepspeed_config_checks(self):
945
+ env_variable_names_to_ignore = [
946
+ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
947
+ "ACCELERATE_GRADIENT_CLIPPING",
948
+ "ACCELERATE_DEEPSPEED_ZERO_STAGE",
949
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
950
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
951
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH",
952
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH",
953
+ "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
954
+ "ACCELERATE_MIXED_PRECISION",
955
+ ]
956
+ env_variable_names_to_ignore = [
957
+ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
958
+ ]
959
+
960
+ deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
961
+
962
+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
963
+ raise ValueError(
964
+ f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
965
+ "Please specify them appropriately in the DeepSpeed config file.\n"
966
+ "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
967
+ "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
968
+ "It will only ask for the necessary config variables when using `deepspeed_config_file`."
969
+ )
970
+
971
+
972
+ @dataclass
973
+ class FullyShardedDataParallelPlugin:
974
+ """
975
+ This plugin is used to enable fully sharded data parallelism.
976
+ """
977
+
978
+ sharding_strategy: "typing.Any" = field(
979
+ default=None,
980
+ metadata={
981
+ "help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`"
982
+ },
983
+ )
984
+ backward_prefetch: "typing.Any" = field(
985
+ default=None,
986
+ metadata={
987
+ "help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`"
988
+ },
989
+ )
990
+ mixed_precision_policy: "typing.Any" = field(
991
+ default=None,
992
+ metadata={
993
+ "help": "A config to enable mixed precision training with FullyShardedDataParallel. "
994
+ "The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. "
995
+ "Each flag expects `torch.dtype` as the value. "
996
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`."
997
+ },
998
+ )
999
+ auto_wrap_policy: Optional[Callable] = field(
1000
+ default=None,
1001
+ metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"},
1002
+ )
1003
+ cpu_offload: "typing.Any" = field(
1004
+ default=None,
1005
+ metadata={
1006
+ "help": "Decides Whether to offload parameters and gradients to CPU. "
1007
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`."
1008
+ },
1009
+ )
1010
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(
1011
+ default=None,
1012
+ metadata={"help": "A list of modules to ignore for FSDP."},
1013
+ )
1014
+ state_dict_type: "typing.Any" = field(
1015
+ default=None,
1016
+ metadata={
1017
+ "help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`"
1018
+ },
1019
+ )
1020
+ state_dict_config: "typing.Any" = field(
1021
+ default=None,
1022
+ metadata={
1023
+ "help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`"
1024
+ },
1025
+ )
1026
+ optim_state_dict_config: "typing.Any" = field(
1027
+ default=None,
1028
+ metadata={
1029
+ "help": "FSDP Optimizer State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.OptimStateDictConfig`"
1030
+ },
1031
+ )
1032
+ limit_all_gathers: bool = field(
1033
+ default=True,
1034
+ metadata={
1035
+ "help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
1036
+ "without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
1037
+ "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
1038
+ "Enabling this can help lower the number of CUDA malloc retries."
1039
+ },
1040
+ )
1041
+ use_orig_params: bool = field(
1042
+ default=True,
1043
+ metadata={
1044
+ "help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
1045
+ "Useful in cases such as parameter-efficient fine-tuning. "
1046
+ "Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
1047
+ "This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
1048
+ },
1049
+ )
1050
+ param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
1051
+ default=None,
1052
+ metadata={
1053
+ "help": "A Callable[torch.nn.Module] -> None that specifies how modules "
1054
+ "that are currently on the meta device should be initialized onto an actual device."
1055
+ },
1056
+ )
1057
+ sync_module_states: bool = field(
1058
+ default=True,
1059
+ metadata={
1060
+ "help": "If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0 "
1061
+ "to ensure they are the same across all ranks after initialization"
1062
+ },
1063
+ )
1064
+ forward_prefetch: bool = field(
1065
+ default=False,
1066
+ metadata={
1067
+ "help": "If True, then FSDP explicitly prefetches the next upcoming "
1068
+ "all-gather while executing in the forward pass. only use with Static graphs."
1069
+ },
1070
+ )
1071
+ activation_checkpointing: bool = field(
1072
+ default=False,
1073
+ metadata={
1074
+ "help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
1075
+ "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
1076
+ "for reduced memory usage."
1077
+ },
1078
+ )
1079
+
1080
+ def __post_init__(self):
1081
+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
1082
+
1083
+ prefix = "FSDP_"
1084
+ if self.sharding_strategy is None:
1085
+ sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
1086
+ sharding_strategy = (
1087
+ FSDP_SHARDING_STRATEGY.index(sharding_strategy) + 1
1088
+ if not sharding_strategy.isdigit()
1089
+ else int(sharding_strategy)
1090
+ )
1091
+ self.sharding_strategy = ShardingStrategy(sharding_strategy)
1092
+
1093
+ if self.cpu_offload is None:
1094
+ if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
1095
+ self.cpu_offload = CPUOffload(offload_params=True)
1096
+ else:
1097
+ self.cpu_offload = CPUOffload(offload_params=False)
1098
+
1099
+ if self.backward_prefetch is None:
1100
+ prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
1101
+ if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
1102
+ self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
1103
+
1104
+ if self.state_dict_type is None:
1105
+ state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
1106
+ self.set_state_dict_type(state_dict_type_policy)
1107
+ self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
1108
+ self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
1109
+ self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
1110
+ self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
1111
+
1112
+ if self.sync_module_states:
1113
+ if is_npu_available():
1114
+ device = torch.npu.current_device()
1115
+ elif is_cuda_available():
1116
+ device = torch.cuda.current_device()
1117
+ elif is_xpu_available():
1118
+ device = torch.xpu.current_device()
1119
+ else:
1120
+ raise RuntimeError(
1121
+ "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
1122
+ )
1123
+ self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
1124
+
1125
+ @staticmethod
1126
+ def get_module_class_from_name(module, name):
1127
+ """
1128
+ Gets a class from a module by its name.
1129
+
1130
+ Args:
1131
+ module (`torch.nn.Module`): The module to get the class from.
1132
+ name (`str`): The name of the class.
1133
+ """
1134
+ modules_children = list(module.children())
1135
+ if module.__class__.__name__ == name:
1136
+ return module.__class__
1137
+ elif len(modules_children) == 0:
1138
+ return
1139
+ else:
1140
+ for child_module in modules_children:
1141
+ module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
1142
+ if module_class is not None:
1143
+ return module_class
1144
+
1145
+ def set_auto_wrap_policy(self, model):
1146
+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
1147
+
1148
+ default_transformer_cls_names_to_wrap = (
1149
+ ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
1150
+ )
1151
+ if self.auto_wrap_policy is None:
1152
+ auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
1153
+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
1154
+ transformer_cls_names_to_wrap = os.environ.get(
1155
+ "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
1156
+ ).split(",")
1157
+ transformer_cls_to_wrap = set()
1158
+ for layer_class in transformer_cls_names_to_wrap:
1159
+ transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class)
1160
+ if transformer_cls is None:
1161
+ raise Exception("Could not find the transformer layer class to wrap in the model.")
1162
+ else:
1163
+ transformer_cls_to_wrap.add(transformer_cls)
1164
+
1165
+ self.auto_wrap_policy = functools.partial(
1166
+ transformer_auto_wrap_policy,
1167
+ # Transformer layer class to wrap
1168
+ transformer_layer_cls=transformer_cls_to_wrap,
1169
+ )
1170
+ elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
1171
+ min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
1172
+ if min_num_params > 0:
1173
+ self.auto_wrap_policy = functools.partial(
1174
+ size_based_auto_wrap_policy, min_num_params=min_num_params
1175
+ )
1176
+
1177
+ def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False):
1178
+ if isinstance(mixed_precision, str):
1179
+ if mixed_precision == "fp16":
1180
+ dtype = torch.float16
1181
+ elif mixed_precision == "bf16":
1182
+ dtype = torch.bfloat16
1183
+ elif mixed_precision == "fp32":
1184
+ dtype = torch.float32
1185
+ else:
1186
+ raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
1187
+ else:
1188
+ dtype = mixed_precision
1189
+
1190
+ buffer_dtype = torch.float32 if buffer_autocast else dtype
1191
+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
1192
+
1193
+ if self.mixed_precision_policy is None or override:
1194
+ self.mixed_precision_policy = MixedPrecision(
1195
+ param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype
1196
+ )
1197
+
1198
+ def set_state_dict_type(self, state_dict_type_policy):
1199
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
1200
+ FullOptimStateDictConfig,
1201
+ FullStateDictConfig,
1202
+ StateDictType,
1203
+ )
1204
+
1205
+ self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
1206
+
1207
+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:
1208
+ if self.state_dict_config is None:
1209
+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1210
+ if self.optim_state_dict_config is None:
1211
+ self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
1212
+
1213
+
1214
+ @dataclass
1215
+ class MegatronLMPlugin:
1216
+ """
1217
+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
1218
+ activation recomputation and optimized fused kernels.
1219
+ """
1220
+
1221
+ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
1222
+ pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
1223
+ num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
1224
+ gradient_clipping: float = field(
1225
+ default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}
1226
+ )
1227
+ sequence_parallelism: bool = field(
1228
+ default=None,
1229
+ metadata={"help": "enable sequence parallelism"},
1230
+ )
1231
+ recompute_activations: bool = field(
1232
+ default=None,
1233
+ metadata={"help": "enable selective activation recomputation"},
1234
+ )
1235
+ use_distributed_optimizer: bool = field(
1236
+ default=None,
1237
+ metadata={"help": "enable distributed optimizer"},
1238
+ )
1239
+ pipeline_model_parallel_split_rank: int = field(
1240
+ default=None, metadata={"help": "Rank where encoder and decoder should be split."}
1241
+ )
1242
+ num_layers_per_virtual_pipeline_stage: int = field(
1243
+ default=None, metadata={"help": "Number of layers per virtual pipeline stage."}
1244
+ )
1245
+ is_train_batch_min: str = field(
1246
+ default=True,
1247
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"},
1248
+ )
1249
+ train_iters: int = field(
1250
+ default=None,
1251
+ metadata={
1252
+ "help": "Total number of iterations to train over all training runs. "
1253
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1254
+ },
1255
+ )
1256
+ train_samples: int = field(
1257
+ default=None,
1258
+ metadata={
1259
+ "help": "Total number of samples to train over all training runs. "
1260
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1261
+ },
1262
+ )
1263
+ weight_decay_incr_style: str = field(
1264
+ default="constant",
1265
+ metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '},
1266
+ )
1267
+ start_weight_decay: float = field(
1268
+ default=None,
1269
+ metadata={"help": "Initial weight decay coefficient for L2 regularization."},
1270
+ )
1271
+ end_weight_decay: float = field(
1272
+ default=None,
1273
+ metadata={"help": "End of run weight decay coefficient for L2 regularization."},
1274
+ )
1275
+ lr_decay_style: str = field(
1276
+ default="linear",
1277
+ metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."},
1278
+ )
1279
+ lr_decay_iters: int = field(
1280
+ default=None,
1281
+ metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."},
1282
+ )
1283
+ lr_decay_samples: int = field(
1284
+ default=None,
1285
+ metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."},
1286
+ )
1287
+ lr_warmup_iters: int = field(
1288
+ default=None,
1289
+ metadata={"help": "number of iterations to linearly warmup learning rate over."},
1290
+ )
1291
+ lr_warmup_samples: int = field(
1292
+ default=None,
1293
+ metadata={"help": "number of samples to linearly warmup learning rate over."},
1294
+ )
1295
+ lr_warmup_fraction: float = field(
1296
+ default=None,
1297
+ metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."},
1298
+ )
1299
+ min_lr: float = field(
1300
+ default=0,
1301
+ metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."},
1302
+ )
1303
+ consumed_samples: List[int] = field(
1304
+ default=None,
1305
+ metadata={
1306
+ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call."
1307
+ },
1308
+ )
1309
+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."})
1310
+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."})
1311
+ lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."})
1312
+ megatron_dataset_flag: bool = field(
1313
+ default=False,
1314
+ metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."},
1315
+ )
1316
+ seq_length: int = field(
1317
+ default=None,
1318
+ metadata={"help": "Maximum sequence length to process."},
1319
+ )
1320
+ encoder_seq_length: int = field(
1321
+ default=None,
1322
+ metadata={"help": "Maximum sequence length to process for the encoder."},
1323
+ )
1324
+ decoder_seq_length: int = field(
1325
+ default=None,
1326
+ metadata={"help": "Maximum sequence length to process for the decoder."},
1327
+ )
1328
+ tensorboard_dir: str = field(
1329
+ default=None,
1330
+ metadata={"help": "Path to save tensorboard logs."},
1331
+ )
1332
+ set_all_logging_options: bool = field(
1333
+ default=False,
1334
+ metadata={"help": "Whether to set all logging options."},
1335
+ )
1336
+ eval_iters: int = field(
1337
+ default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."}
1338
+ )
1339
+ eval_interval: int = field(
1340
+ default=1000, metadata={"help": "Interval between running evaluation on validation set."}
1341
+ )
1342
+ return_logits: bool = field(
1343
+ default=False,
1344
+ metadata={"help": "Whether to return logits from the model."},
1345
+ )
1346
+
1347
+ # custom train step args
1348
+ custom_train_step_class: Optional[Any] = field(
1349
+ default=None,
1350
+ metadata={"help": "Custom train step class."},
1351
+ )
1352
+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(
1353
+ default=None,
1354
+ metadata={"help": "Custom train step kwargs."},
1355
+ )
1356
+
1357
+ # custom model args
1358
+ custom_model_provider_function: Optional[Callable] = field(
1359
+ default=None,
1360
+ metadata={"help": "Custom model provider function."},
1361
+ )
1362
+ custom_prepare_model_function: Optional[Callable] = field(
1363
+ default=None,
1364
+ metadata={"help": "Custom prepare model function."},
1365
+ )
1366
+
1367
+ # remaining args such as enabling Alibi/ROPE positional embeddings,
1368
+ # wandb logging, Multi-Query Attention, etc.
1369
+ other_megatron_args: Optional[Dict[str, Any]] = field(
1370
+ default=None,
1371
+ metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
1372
+ )
1373
+
1374
+ def __post_init__(self):
1375
+ prefix = "MEGATRON_LM_"
1376
+ if self.tp_degree is None:
1377
+ self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1))
1378
+ if self.pp_degree is None:
1379
+ self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1))
1380
+ if self.num_micro_batches is None:
1381
+ self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
1382
+ if self.gradient_clipping is None:
1383
+ self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
1384
+ if self.recompute_activations is None:
1385
+ self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
1386
+ if self.use_distributed_optimizer is None:
1387
+ self.use_distributed_optimizer = (
1388
+ str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
1389
+ )
1390
+ if self.sequence_parallelism is None:
1391
+ self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
1392
+
1393
+ if self.pp_degree > 1 or self.use_distributed_optimizer:
1394
+ self.DDP_impl = "local"
1395
+ else:
1396
+ self.DDP_impl = "torch"
1397
+
1398
+ if self.consumed_samples is not None:
1399
+ if len(self.consumed_samples) == 1:
1400
+ self.consumed_samples.extend([0, 0])
1401
+ elif len(self.consumed_samples) == 2:
1402
+ self.consumed_samples.append(0)
1403
+
1404
+ self.megatron_lm_default_args = {
1405
+ "tensor_model_parallel_size": self.tp_degree,
1406
+ "pipeline_model_parallel_size": self.pp_degree,
1407
+ "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank,
1408
+ "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage,
1409
+ "DDP_impl": self.DDP_impl,
1410
+ "use_distributed_optimizer": self.use_distributed_optimizer,
1411
+ "sequence_parallel": self.sequence_parallelism,
1412
+ "clip_grad": self.gradient_clipping,
1413
+ "num_micro_batches": self.num_micro_batches,
1414
+ "consumed_samples": self.consumed_samples,
1415
+ "no_wd_decay_cond": self.no_wd_decay_cond,
1416
+ "scale_lr_cond": self.scale_lr_cond,
1417
+ "lr_mult": self.lr_mult,
1418
+ "megatron_dataset_flag": self.megatron_dataset_flag,
1419
+ "eval_iters": self.eval_iters,
1420
+ "eval_interval": self.eval_interval,
1421
+ }
1422
+ if self.recompute_activations:
1423
+ self.megatron_lm_default_args["recompute_granularity"] = "selective"
1424
+ if self.tensorboard_dir is not None:
1425
+ self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
1426
+ if self.set_all_logging_options:
1427
+ self.set_tensorboard_logging_options()
1428
+ if self.other_megatron_args is not None:
1429
+ self.megatron_lm_default_args.update(self.other_megatron_args)
1430
+
1431
+ def set_network_size_args(self, model, batch_data=None):
1432
+ # Check if the model is either BERT, GPT or T5 else raise error
1433
+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
1434
+ if "megatron-bert" in model.config.model_type.lower():
1435
+ model_type_name = "bert"
1436
+ num_layers = model.config.num_hidden_layers
1437
+ hidden_size = model.config.hidden_size
1438
+ num_attention_heads = model.config.num_attention_heads
1439
+ max_position_embeddings = model.config.max_position_embeddings
1440
+ num_labels = model.config.num_labels
1441
+ orig_vocab_size = model.config.vocab_size
1442
+ if "maskedlm" in model.__class__.__name__.lower():
1443
+ pretraining_flag = True
1444
+ if self.seq_length is not None:
1445
+ if self.encoder_seq_length is not None:
1446
+ warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.")
1447
+ self.seq_length = self.encoder_seq_length
1448
+ elif self.encoder_seq_length is not None:
1449
+ self.seq_length = self.encoder_seq_length
1450
+ elif batch_data is not None:
1451
+ self.seq_length = batch_data["input_ids"].shape[1]
1452
+ else:
1453
+ self.seq_length = max_position_embeddings
1454
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1455
+ elif "gpt2" in model.config.model_type.lower():
1456
+ model_type_name = "gpt"
1457
+ num_layers = model.config.n_layer
1458
+ hidden_size = model.config.n_embd
1459
+ num_attention_heads = model.config.n_head
1460
+ max_position_embeddings = model.config.n_positions
1461
+ orig_vocab_size = model.config.vocab_size
1462
+ pretraining_flag = True
1463
+ if self.seq_length is not None:
1464
+ if self.decoder_seq_length is not None:
1465
+ warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.")
1466
+ self.seq_length = self.decoder_seq_length
1467
+ elif self.decoder_seq_length is not None:
1468
+ self.seq_length = self.decoder_seq_length
1469
+ elif batch_data is not None:
1470
+ self.seq_length = batch_data["input_ids"].shape[1]
1471
+ else:
1472
+ self.seq_length = max_position_embeddings
1473
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1474
+ self.megatron_lm_default_args["return_logits"] = self.return_logits
1475
+ self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer"
1476
+ elif "t5" in model.config.model_type.lower():
1477
+ model_type_name = "t5"
1478
+ num_layers = model.config.num_layers
1479
+ hidden_size = model.config.d_model
1480
+ num_attention_heads = model.config.num_heads
1481
+ max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024
1482
+ orig_vocab_size = model.config.vocab_size
1483
+ pretraining_flag = True
1484
+ if self.encoder_seq_length is None:
1485
+ if batch_data is not None:
1486
+ self.encoder_seq_length = batch_data["input_ids"].shape[1]
1487
+ else:
1488
+ self.encoder_seq_length = max_position_embeddings
1489
+ if self.decoder_seq_length is None:
1490
+ if batch_data is not None:
1491
+ self.decoder_seq_length = batch_data["labels"].shape[1]
1492
+ else:
1493
+ self.decoder_seq_length = max_position_embeddings
1494
+
1495
+ self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
1496
+ self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
1497
+ else:
1498
+ raise ValueError(
1499
+ "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
1500
+ "Please check the model you are using is one of those."
1501
+ )
1502
+
1503
+ self.megatron_lm_default_args["model_type_name"] = model_type_name
1504
+ self.megatron_lm_default_args["num_layers"] = num_layers
1505
+ self.megatron_lm_default_args["hidden_size"] = hidden_size
1506
+ self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads
1507
+ self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings
1508
+ self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag
1509
+ self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size
1510
+ self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
1511
+ if model_type_name == "bert":
1512
+ self.megatron_lm_default_args["num_labels"] = num_labels
1513
+
1514
+ def set_mixed_precision(self, mixed_precision):
1515
+ if mixed_precision == "fp16":
1516
+ self.megatron_lm_default_args["fp16"] = True
1517
+ elif mixed_precision == "bf16":
1518
+ self.megatron_lm_default_args["bf16"] = True
1519
+ self.DDP_impl = "local"
1520
+ self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
1521
+
1522
+ def set_training_args(self, micro_batch_size, dp_degree):
1523
+ self.data_parallel_size = dp_degree
1524
+ self.micro_batch_size = micro_batch_size
1525
+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches
1526
+ self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
1527
+ self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
1528
+ self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
1529
+
1530
+ def set_optimizer_type(self, optimizer):
1531
+ optimizer_name = optimizer.__class__.__name__.lower()
1532
+ if "adam" in optimizer_name:
1533
+ self.megatron_lm_default_args["optimizer"] = "adam"
1534
+ self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0]
1535
+ self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1]
1536
+ self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"]
1537
+ elif "sgd" in optimizer_name:
1538
+ self.megatron_lm_default_args["optimizer"] = "sgd"
1539
+ self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
1540
+ else:
1541
+ raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
1542
+
1543
+ self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
1544
+ self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
1545
+
1546
+ def set_scheduler_args(self, scheduler):
1547
+ if self.train_iters is None:
1548
+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1549
+ if self.train_samples is not None:
1550
+ self.train_samples = None
1551
+ warnings.warn(
1552
+ "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training."
1553
+ )
1554
+ if self.lr_warmup_iters is None:
1555
+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1556
+ if self.lr_warmup_samples is not None:
1557
+ warnings.warn(
1558
+ "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
1559
+ )
1560
+ self.lr_warmup_samples = 0
1561
+
1562
+ self.megatron_lm_default_args["train_iters"] = self.train_iters
1563
+ self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
1564
+ self.megatron_lm_default_args["train_samples"] = self.train_samples
1565
+ self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples
1566
+ self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters
1567
+ self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples
1568
+ self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction
1569
+ self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style
1570
+ self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style
1571
+ self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
1572
+ self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
1573
+ self.megatron_lm_default_args["min_lr"] = self.min_lr
1574
+
1575
+ def set_tensorboard_logging_options(self):
1576
+ from megatron.arguments import _add_logging_args
1577
+
1578
+ parser = argparse.ArgumentParser()
1579
+ parser = _add_logging_args(parser)
1580
+ logging_args = parser.parse_known_args()
1581
+ self.dataset_args = vars(logging_args[0])
1582
+ for key, value in self.dataset_args.items():
1583
+ if key.startswith("log_"):
1584
+ self.megatron_lm_default_args[key] = True
1585
+ elif key.startswith("no_log_"):
1586
+ self.megatron_lm_default_args[key.replace("no_", "")] = True
1587
+
1588
+
1589
+ @dataclass
1590
+ class BnbQuantizationConfig:
1591
+ """
1592
+ A plugin to enable BitsAndBytes 4bit and 8bit quantization
1593
+ """
1594
+
1595
+ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1596
+
1597
+ llm_int8_threshold: float = field(
1598
+ default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
1599
+ )
1600
+
1601
+ load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
1602
+
1603
+ bnb_4bit_quant_type: str = field(
1604
+ default="fp4",
1605
+ metadata={
1606
+ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
1607
+ },
1608
+ )
1609
+
1610
+ bnb_4bit_use_double_quant: bool = field(
1611
+ default=False,
1612
+ metadata={
1613
+ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
1614
+ },
1615
+ )
1616
+
1617
+ bnb_4bit_compute_dtype: bool = field(
1618
+ default="fp16",
1619
+ metadata={
1620
+ "help": "This sets the computational type which might be different than the input time. For example, inputs might be "
1621
+ "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
1622
+ },
1623
+ )
1624
+
1625
+ torch_dtype: torch.dtype = field(
1626
+ default=None,
1627
+ metadata={
1628
+ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value"
1629
+ "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
1630
+ },
1631
+ )
1632
+
1633
+ skip_modules: List[str] = field(
1634
+ default=None,
1635
+ metadata={
1636
+ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
1637
+ },
1638
+ )
1639
+
1640
+ keep_in_fp32_modules: List[str] = field(
1641
+ default=None,
1642
+ metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
1643
+ )
1644
+
1645
+ def __post_init__(self):
1646
+ """
1647
+ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
1648
+ """
1649
+ if not isinstance(self.load_in_8bit, bool):
1650
+ raise ValueError("load_in_8bit must be a boolean")
1651
+
1652
+ if not isinstance(self.load_in_4bit, bool):
1653
+ raise ValueError("load_in_4bit must be a boolean")
1654
+
1655
+ if self.load_in_4bit and self.load_in_8bit:
1656
+ raise ValueError("load_in_4bit and load_in_8 can't be both True")
1657
+
1658
+ if not self.load_in_4bit and not self.load_in_8bit:
1659
+ raise ValueError("load_in_4bit and load_in_8 can't be both False")
1660
+
1661
+ if not isinstance(self.llm_int8_threshold, (int, float)):
1662
+ raise ValueError("llm_int8_threshold must be a float or an int")
1663
+
1664
+ if not isinstance(self.bnb_4bit_quant_type, str):
1665
+ raise ValueError("bnb_4bit_quant_type must be a string")
1666
+ elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
1667
+ raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
1668
+
1669
+ if not isinstance(self.bnb_4bit_use_double_quant, bool):
1670
+ raise ValueError("bnb_4bit_use_double_quant must be a boolean")
1671
+
1672
+ if isinstance(self.bnb_4bit_compute_dtype, str):
1673
+ if self.bnb_4bit_compute_dtype == "fp32":
1674
+ self.bnb_4bit_compute_dtype = torch.float32
1675
+ elif self.bnb_4bit_compute_dtype == "fp16":
1676
+ self.bnb_4bit_compute_dtype = torch.float16
1677
+ elif self.bnb_4bit_compute_dtype == "bf16":
1678
+ self.bnb_4bit_compute_dtype = torch.bfloat16
1679
+ else:
1680
+ raise ValueError(
1681
+ f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}"
1682
+ )
1683
+ elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
1684
+ raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
1685
+
1686
+ if self.skip_modules is not None and not isinstance(self.skip_modules, list):
1687
+ raise ValueError("skip_modules must be a list of strings")
1688
+
1689
+ if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
1690
+ raise ValueError("keep_in_fp_32_modules must be a list of strings")
1691
+
1692
+ if self.load_in_4bit:
1693
+ self.target_dtype = CustomDtype.INT4
1694
+
1695
+ if self.load_in_8bit:
1696
+ self.target_dtype = torch.int8
1697
+
1698
+ if self.load_in_4bit and self.llm_int8_threshold != 6.0:
1699
+ warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
1700
+
1701
+ if isinstance(self.torch_dtype, str):
1702
+ if self.torch_dtype == "fp32":
1703
+ self.torch_dtype = torch.float32
1704
+ elif self.torch_dtype == "fp16":
1705
+ self.torch_dtype = torch.float16
1706
+ elif self.torch_dtype == "bf16":
1707
+ self.torch_dtype = torch.bfloat16
1708
+ else:
1709
+ raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
1710
+ if self.load_in_8bit and self.torch_dtype is None:
1711
+ self.torch_dtype = torch.float16
1712
+
1713
+ if self.load_in_4bit and self.torch_dtype is None:
1714
+ self.torch_dtype = self.bnb_4bit_compute_dtype
1715
+
1716
+ if not isinstance(self.torch_dtype, torch.dtype):
1717
+ raise ValueError("torch_dtype must be a torch.dtype")
env-llmeval/lib/python3.10/site-packages/accelerate/utils/deepspeed.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import base64
16
+ import json
17
+ import os
18
+ from copy import deepcopy
19
+
20
+ from ..optimizer import AcceleratedOptimizer
21
+ from ..scheduler import AcceleratedScheduler
22
+
23
+
24
+ class HfDeepSpeedConfig:
25
+ """
26
+ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
27
+
28
+ A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
29
+ things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
30
+ it's important that this object remains alive while the program is still running.
31
+
32
+ [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
33
+ with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
34
+ the DeepSpeed configuration is not modified in any way.
35
+
36
+ Args:
37
+ config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
38
+
39
+ """
40
+
41
+ def __init__(self, config_file_or_dict):
42
+ if isinstance(config_file_or_dict, dict):
43
+ # Don't modify user's data should they want to reuse it (e.g. in tests), because once we
44
+ # modified it, it will not be accepted here again, since `auto` values would have been overridden
45
+ config = deepcopy(config_file_or_dict)
46
+ elif os.path.exists(config_file_or_dict):
47
+ with open(config_file_or_dict, encoding="utf-8") as f:
48
+ config = json.load(f)
49
+ else:
50
+ try:
51
+ config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8")
52
+ config = json.loads(config_decoded)
53
+ except (UnicodeDecodeError, AttributeError, ValueError):
54
+ raise ValueError(
55
+ f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}"
56
+ )
57
+
58
+ self.config = config
59
+
60
+ self.set_stage_and_offload()
61
+
62
+ def set_stage_and_offload(self):
63
+ # zero stage - this is done as early as possible, before model is created, to allow
64
+ # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
65
+ # during ``zero.Init()`` which needs to know the dtype, and some other hparams.
66
+ self._stage = self.get_value("zero_optimization.stage", -1)
67
+
68
+ # offload
69
+ self._offload = False
70
+ if self.is_zero2() or self.is_zero3():
71
+ offload_devices_valid = set(["cpu", "nvme"])
72
+ offload_devices = set(
73
+ [
74
+ self.get_value("zero_optimization.offload_optimizer.device"),
75
+ self.get_value("zero_optimization.offload_param.device"),
76
+ ]
77
+ )
78
+ if len(offload_devices & offload_devices_valid) > 0:
79
+ self._offload = True
80
+
81
+ def find_config_node(self, ds_key_long):
82
+ config = self.config
83
+
84
+ # find the config node of interest if it exists
85
+ nodes = ds_key_long.split(".")
86
+ ds_key = nodes.pop()
87
+ for node in nodes:
88
+ config = config.get(node)
89
+ if config is None:
90
+ return None, ds_key
91
+
92
+ return config, ds_key
93
+
94
+ def get_value(self, ds_key_long, default=None):
95
+ """
96
+ Returns the set value or `default` if no value is set
97
+ """
98
+ config, ds_key = self.find_config_node(ds_key_long)
99
+ if config is None:
100
+ return default
101
+ return config.get(ds_key, default)
102
+
103
+ def del_config_sub_tree(self, ds_key_long, must_exist=False):
104
+ """
105
+ Deletes a sub-section of the config file if it's found.
106
+
107
+ Unless `must_exist` is `True` the section doesn't have to exist.
108
+ """
109
+ config = self.config
110
+
111
+ # find the config node of interest if it exists
112
+ nodes = ds_key_long.split(".")
113
+ for node in nodes:
114
+ parent_config = config
115
+ config = config.get(node)
116
+ if config is None:
117
+ if must_exist:
118
+ raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}")
119
+ else:
120
+ return
121
+
122
+ # if found remove it
123
+ if parent_config is not None:
124
+ parent_config.pop(node)
125
+
126
+ def is_true(self, ds_key_long):
127
+ """
128
+ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
129
+ specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set).
130
+
131
+ """
132
+ value = self.get_value(ds_key_long)
133
+ return False if value is None else bool(value)
134
+
135
+ def is_false(self, ds_key_long):
136
+ """
137
+ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
138
+ specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).
139
+ """
140
+ value = self.get_value(ds_key_long)
141
+ return False if value is None else not bool(value)
142
+
143
+ def is_zero2(self):
144
+ return self._stage == 2
145
+
146
+ def is_zero3(self):
147
+ return self._stage == 3
148
+
149
+ def is_offload(self):
150
+ return self._offload
151
+
152
+
153
+ class DeepSpeedEngineWrapper:
154
+ """
155
+ Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop.
156
+
157
+ Args:
158
+ engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
159
+ """
160
+
161
+ def __init__(self, engine):
162
+ self.engine = engine
163
+
164
+ def backward(self, loss, **kwargs):
165
+ # runs backpropagation and handles mixed precision
166
+ self.engine.backward(loss, **kwargs)
167
+
168
+ # Deepspeed's `engine.step` performs the following operations:
169
+ # - gradient accumulation check
170
+ # - gradient clipping
171
+ # - optimizer step
172
+ # - zero grad
173
+ # - checking overflow
174
+ # - lr_scheduler step (only if engine.lr_scheduler is not None)
175
+ self.engine.step()
176
+ # and this plugin overrides the above calls with no-ops when Accelerate runs under
177
+ # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
178
+ # training loop that works transparently under many training regimes.
179
+
180
+
181
+ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
182
+ """
183
+ Internal wrapper around a deepspeed optimizer.
184
+
185
+ Args:
186
+ optimizer (`torch.optim.optimizer.Optimizer`):
187
+ The optimizer to wrap.
188
+ """
189
+
190
+ def __init__(self, optimizer):
191
+ super().__init__(optimizer, device_placement=False, scaler=None)
192
+ self.__has_overflow__ = hasattr(self.optimizer, "overflow")
193
+
194
+ def zero_grad(self, set_to_none=None):
195
+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
196
+
197
+ def step(self):
198
+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
199
+
200
+ @property
201
+ def step_was_skipped(self):
202
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
203
+ if self.__has_overflow__:
204
+ return self.optimizer.overflow
205
+ return False
206
+
207
+
208
+ class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
209
+ """
210
+ Internal wrapper around a deepspeed scheduler.
211
+
212
+ Args:
213
+ scheduler (`torch.optim.lr_scheduler.LambdaLR`):
214
+ The scheduler to wrap.
215
+ optimizers (one or a list of `torch.optim.Optimizer`):
216
+ """
217
+
218
+ def __init__(self, scheduler, optimizers):
219
+ super().__init__(scheduler, optimizers)
220
+
221
+ def step(self):
222
+ pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
223
+
224
+
225
+ class DummyOptim:
226
+ """
227
+ Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training
228
+ loop when optimizer config is specified in the deepspeed config file.
229
+
230
+ Args:
231
+ lr (float):
232
+ Learning rate.
233
+ params (iterable): iterable of parameters to optimize or dicts defining
234
+ parameter groups
235
+ weight_decay (float):
236
+ Weight decay.
237
+ **kwargs (additional keyword arguments, *optional*):
238
+ Other arguments.
239
+ """
240
+
241
+ def __init__(self, params, lr=0.001, weight_decay=0, **kwargs):
242
+ self.params = params
243
+ self.lr = lr
244
+ self.weight_decay = weight_decay
245
+ self.kwargs = kwargs
246
+
247
+
248
+ class DummyScheduler:
249
+ """
250
+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
251
+ loop when scheduler config is specified in the deepspeed config file.
252
+
253
+ Args:
254
+ optimizer (`torch.optim.optimizer.Optimizer`):
255
+ The optimizer to wrap.
256
+ total_num_steps (int, *optional*):
257
+ Total number of steps.
258
+ warmup_num_steps (int, *optional*):
259
+ Number of steps for warmup.
260
+ lr_scheduler_callable (callable, *optional*):
261
+ A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`.
262
+ **kwargs (additional keyword arguments, *optional*):
263
+ Other arguments.
264
+ """
265
+
266
+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs):
267
+ self.optimizer = optimizer
268
+ self.total_num_steps = total_num_steps
269
+ self.warmup_num_steps = warmup_num_steps
270
+ self.lr_scheduler_callable = lr_scheduler_callable
271
+ self.kwargs = kwargs
env-llmeval/lib/python3.10/site-packages/accelerate/utils/environment.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import math
17
+ import os
18
+ import platform
19
+ import subprocess
20
+ import sys
21
+ from dataclasses import dataclass, field
22
+ from functools import lru_cache
23
+ from shutil import which
24
+ from typing import List, Optional
25
+
26
+ import torch
27
+ from packaging.version import parse
28
+
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ def convert_dict_to_env_variables(current_env: dict):
34
+ """
35
+ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of
36
+ strings as the result.
37
+
38
+ Example:
39
+ ```python
40
+ >>> from accelerate.utils.environment import verify_env
41
+
42
+ >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"}
43
+ >>> valid_env_items = verify_env(env)
44
+ >>> print(valid_env_items)
45
+ ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
46
+ ```
47
+ """
48
+ forbidden_chars = [";", "\n", "<", ">", " "]
49
+ valid_env_items = []
50
+ for key, value in current_env.items():
51
+ if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1:
52
+ valid_env_items.append(f"{key}={value}\n")
53
+ else:
54
+ logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.")
55
+ return valid_env_items
56
+
57
+
58
+ def str_to_bool(value) -> int:
59
+ """
60
+ Converts a string representation of truth to `True` (1) or `False` (0).
61
+
62
+ True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
63
+ """
64
+ value = value.lower()
65
+ if value in ("y", "yes", "t", "true", "on", "1"):
66
+ return 1
67
+ elif value in ("n", "no", "f", "false", "off", "0"):
68
+ return 0
69
+ else:
70
+ raise ValueError(f"invalid truth value {value}")
71
+
72
+
73
+ def get_int_from_env(env_keys, default):
74
+ """Returns the first positive env value found in the `env_keys` list or the default."""
75
+ for e in env_keys:
76
+ val = int(os.environ.get(e, -1))
77
+ if val >= 0:
78
+ return val
79
+ return default
80
+
81
+
82
+ def parse_flag_from_env(key, default=False):
83
+ """Returns truthy value for `key` from the env if available else the default."""
84
+ value = os.environ.get(key, str(default))
85
+ return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
86
+
87
+
88
+ def parse_choice_from_env(key, default="no"):
89
+ value = os.environ.get(key, str(default))
90
+ return value
91
+
92
+
93
+ def are_libraries_initialized(*library_names: str) -> List[str]:
94
+ """
95
+ Checks if any of `library_names` are imported in the environment. Will return any names that are.
96
+ """
97
+ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
98
+
99
+
100
+ def _nvidia_smi():
101
+ """
102
+ Returns the right nvidia-smi command based on the system.
103
+ """
104
+ if platform.system() == "Windows":
105
+ # If platform is Windows and nvidia-smi can't be found in path
106
+ # try from systemd drive with default installation path
107
+ command = which("nvidia-smi")
108
+ if command is None:
109
+ command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"]
110
+ else:
111
+ command = "nvidia-smi"
112
+ return command
113
+
114
+
115
+ def get_gpu_info():
116
+ """
117
+ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
118
+
119
+ Largely based on the `gputil` library.
120
+ """
121
+ # Returns as list of `n` GPUs and their names
122
+ output = subprocess.check_output(
123
+ [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True
124
+ )
125
+ output = output.strip()
126
+ gpus = output.split(os.linesep)
127
+ # Get names from output
128
+ gpu_count = len(gpus)
129
+ gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
130
+ return gpu_names, gpu_count
131
+
132
+
133
+ def get_driver_version():
134
+ """
135
+ Returns the driver version
136
+
137
+ In the case of multiple GPUs, will return the first.
138
+ """
139
+ output = subprocess.check_output(
140
+ [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True
141
+ )
142
+ output = output.strip()
143
+ return output.split(os.linesep)[0]
144
+
145
+
146
+ def check_cuda_p2p_ib_support():
147
+ """
148
+ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
149
+ the 3090.
150
+
151
+ Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
152
+ """
153
+ try:
154
+ device_names, device_count = get_gpu_info()
155
+ # As new consumer GPUs get released, add them to `unsupported_devices``
156
+ unsupported_devices = {"RTX 40"}
157
+ if device_count > 1:
158
+ if any(
159
+ unsupported_device in device_name
160
+ for device_name in device_names
161
+ for unsupported_device in unsupported_devices
162
+ ):
163
+ # Check if they have the right driver version
164
+ acceptable_driver_version = "550.40.07"
165
+ current_driver_version = get_driver_version()
166
+ if parse(current_driver_version) < parse(acceptable_driver_version):
167
+ return False
168
+ return True
169
+ except Exception:
170
+ pass
171
+ return True
172
+
173
+
174
+ def check_fp8_capability():
175
+ """
176
+ Checks if all the current GPUs available support FP8.
177
+
178
+ Notably must initialize `torch.cuda` to check.
179
+ """
180
+ cuda_device_capacity = torch.cuda.get_device_capability()
181
+ return cuda_device_capacity >= (8, 9)
182
+
183
+
184
+ @dataclass
185
+ class CPUInformation:
186
+ """
187
+ Stores information about the CPU in a distributed environment. It contains the following attributes:
188
+ - rank: The rank of the current process.
189
+ - world_size: The total number of processes in the world.
190
+ - local_rank: The rank of the current process on the local node.
191
+ - local_world_size: The total number of processes on the local node.
192
+ """
193
+
194
+ rank: int = field(default=0, metadata={"help": "The rank of the current process."})
195
+ world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."})
196
+ local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."})
197
+ local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."})
198
+
199
+
200
+ def get_cpu_distributed_information() -> CPUInformation:
201
+ """
202
+ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation`
203
+ dataclass.
204
+ """
205
+ information = {}
206
+ information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
207
+ information["world_size"] = get_int_from_env(
208
+ ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1
209
+ )
210
+ information["local_rank"] = get_int_from_env(
211
+ ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
212
+ )
213
+ information["local_world_size"] = get_int_from_env(
214
+ ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
215
+ 1,
216
+ )
217
+ return CPUInformation(**information)
218
+
219
+
220
+ def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
221
+ """
222
+ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the
223
+ affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead.
224
+
225
+ Args:
226
+ local_process_index (int):
227
+ The index of the current process on the current server.
228
+ verbose (bool, *optional*):
229
+ Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
230
+ """
231
+ if verbose is None:
232
+ verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False)
233
+ if torch.cuda.is_available():
234
+ from accelerate.utils import is_pynvml_available
235
+
236
+ if not is_pynvml_available():
237
+ raise ImportError(
238
+ "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)"
239
+ )
240
+ import pynvml as nvml
241
+
242
+ # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py
243
+ nvml.nvmlInit()
244
+ num_elements = math.ceil(os.cpu_count() / 64)
245
+ handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index)
246
+ affinity_string = ""
247
+ for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements):
248
+ # assume nvml returns list of 64 bit ints
249
+ affinity_string = f"{j:064b}{affinity_string}"
250
+ affinity_list = [int(x) for x in affinity_string]
251
+ affinity_list.reverse() # so core 0 is the 0th element
252
+ affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0]
253
+ os.sched_setaffinity(0, affinity_to_set)
254
+ if verbose:
255
+ cpu_cores = os.sched_getaffinity(0)
256
+ logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
257
+
258
+
259
+ @lru_cache
260
+ def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
261
+ """
262
+ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node.
263
+
264
+ This result is cached between calls. If you want to override it, please use
265
+ `accelerate.utils.environment.override_numa_afifnity`.
266
+
267
+ Args:
268
+ local_process_index (int):
269
+ The index of the current process on the current server.
270
+ verbose (bool, *optional*):
271
+ Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will
272
+ default to True.
273
+ """
274
+ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+
16
+ import torch
17
+
18
+ from ..logging import get_logger
19
+ from .constants import FSDP_MODEL_NAME, FSDP_PYTORCH_VERSION, OPTIMIZER_NAME
20
+ from .imports import is_torch_distributed_available
21
+ from .modeling import is_peft_model
22
+ from .versions import is_torch_version
23
+
24
+
25
+ if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
26
+ import torch.distributed.checkpoint as dist_cp
27
+ from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
28
+ from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
29
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
30
+ from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
31
+
32
+
33
+ logger = get_logger(__name__)
34
+
35
+
36
+ def _get_model_state_dict(model, adapter_only=False):
37
+ if adapter_only and is_peft_model(model):
38
+ from peft import get_peft_model_state_dict
39
+
40
+ return get_peft_model_state_dict(model, adapter_name=model.active_adapter)
41
+ else:
42
+ return model.state_dict()
43
+
44
+
45
+ def _set_model_state_dict(model, state_dict, adapter_only=False):
46
+ if adapter_only and is_peft_model(model):
47
+ from peft import set_peft_model_state_dict
48
+
49
+ return set_peft_model_state_dict(model, state_dict, adapter_name=model.active_adapter)
50
+ else:
51
+ return model.load_state_dict(state_dict)
52
+
53
+
54
+ def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0, adapter_only=False):
55
+ os.makedirs(output_dir, exist_ok=True)
56
+
57
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
58
+ # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
59
+ # so, only enable it when num_processes>1
60
+ is_multi_process = accelerator.num_processes > 1
61
+ fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
62
+ fsdp_plugin.state_dict_config.rank0_only = is_multi_process
63
+
64
+ with FSDP.state_dict_type(
65
+ model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
66
+ ):
67
+ state_dict = _get_model_state_dict(model, adapter_only=adapter_only)
68
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
69
+ weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin"
70
+ output_model_file = os.path.join(output_dir, weights_name)
71
+ if accelerator.process_index == 0:
72
+ logger.info(f"Saving model to {output_model_file}")
73
+ torch.save(state_dict, output_model_file)
74
+ logger.info(f"Model saved to {output_model_file}")
75
+ elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
76
+ weights_name = (
77
+ f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin"
78
+ if model_index == 0
79
+ else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
80
+ )
81
+ output_model_file = os.path.join(output_dir, weights_name)
82
+ logger.info(f"Saving model to {output_model_file}")
83
+ torch.save(state_dict, output_model_file)
84
+ logger.info(f"Model saved to {output_model_file}")
85
+ elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
86
+ ckpt_dir = os.path.join(output_dir, f"{FSDP_MODEL_NAME}_{model_index}")
87
+ os.makedirs(ckpt_dir, exist_ok=True)
88
+ logger.info(f"Saving model to {ckpt_dir}")
89
+ state_dict = {"model": state_dict}
90
+
91
+ dist_cp.save_state_dict(
92
+ state_dict=state_dict,
93
+ storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
94
+ planner=DefaultSavePlanner(),
95
+ )
96
+ logger.info(f"Model saved to {ckpt_dir}")
97
+
98
+
99
+ def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0, adapter_only=False):
100
+ accelerator.wait_for_everyone()
101
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
102
+ # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
103
+ # so, only enable it when num_processes>1
104
+ is_multi_process = accelerator.num_processes > 1
105
+ fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
106
+ fsdp_plugin.state_dict_config.rank0_only = is_multi_process
107
+ with FSDP.state_dict_type(
108
+ model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
109
+ ):
110
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
111
+ if type(model) != FSDP and accelerator.process_index != 0:
112
+ if not fsdp_plugin.sync_module_states:
113
+ raise ValueError(
114
+ "Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
115
+ "initializing FSDP object"
116
+ )
117
+ return
118
+ weights_name = f"{FSDP_MODEL_NAME}.bin" if model_index == 0 else f"{FSDP_MODEL_NAME}_{model_index}.bin"
119
+ input_model_file = os.path.join(input_dir, weights_name)
120
+ logger.info(f"Loading model from {input_model_file}")
121
+ state_dict = torch.load(input_model_file)
122
+ logger.info(f"Model loaded from {input_model_file}")
123
+ elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
124
+ weights_name = (
125
+ f"{FSDP_MODEL_NAME}_rank{accelerator.process_index}.bin"
126
+ if model_index == 0
127
+ else f"{FSDP_MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
128
+ )
129
+ input_model_file = os.path.join(input_dir, weights_name)
130
+ logger.info(f"Loading model from {input_model_file}")
131
+ state_dict = torch.load(input_model_file)
132
+ logger.info(f"Model loaded from {input_model_file}")
133
+ elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
134
+ ckpt_dir = (
135
+ os.path.join(input_dir, f"{FSDP_MODEL_NAME}_{model_index}")
136
+ if f"{FSDP_MODEL_NAME}" not in input_dir
137
+ else input_dir
138
+ )
139
+ logger.info(f"Loading model from {ckpt_dir}")
140
+ state_dict = {"model": _get_model_state_dict(model, adapter_only=adapter_only)}
141
+ dist_cp.load_state_dict(
142
+ state_dict=state_dict,
143
+ storage_reader=dist_cp.FileSystemReader(ckpt_dir),
144
+ planner=DefaultLoadPlanner(),
145
+ )
146
+ state_dict = state_dict["model"]
147
+ logger.info(f"Model loaded from {ckpt_dir}")
148
+ load_result = _set_model_state_dict(model, state_dict, adapter_only=adapter_only)
149
+ return load_result
150
+
151
+
152
+ def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):
153
+ os.makedirs(output_dir, exist_ok=True)
154
+ with FSDP.state_dict_type(
155
+ model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
156
+ ):
157
+ optim_state = FSDP.optim_state_dict(model, optimizer)
158
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
159
+ if accelerator.process_index == 0:
160
+ optim_state_name = (
161
+ f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
162
+ )
163
+ output_optimizer_file = os.path.join(output_dir, optim_state_name)
164
+ logger.info(f"Saving Optimizer state to {output_optimizer_file}")
165
+ torch.save(optim_state, output_optimizer_file)
166
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
167
+ else:
168
+ ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}")
169
+ os.makedirs(ckpt_dir, exist_ok=True)
170
+ logger.info(f"Saving Optimizer state to {ckpt_dir}")
171
+ dist_cp.save_state_dict(
172
+ state_dict={"optimizer": optim_state},
173
+ storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
174
+ planner=DefaultSavePlanner(),
175
+ )
176
+ logger.info(f"Optimizer state saved in {ckpt_dir}")
177
+
178
+
179
+ def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0, adapter_only=False):
180
+ accelerator.wait_for_everyone()
181
+ with FSDP.state_dict_type(
182
+ model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
183
+ ):
184
+ if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
185
+ optim_state = None
186
+ if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
187
+ optimizer_name = (
188
+ f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
189
+ )
190
+ input_optimizer_file = os.path.join(input_dir, optimizer_name)
191
+ logger.info(f"Loading Optimizer state from {input_optimizer_file}")
192
+ optim_state = torch.load(input_optimizer_file)
193
+ logger.info(f"Optimizer state loaded from {input_optimizer_file}")
194
+ else:
195
+ ckpt_dir = (
196
+ os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}")
197
+ if f"{OPTIMIZER_NAME}" not in input_dir
198
+ else input_dir
199
+ )
200
+ logger.info(f"Loading Optimizer from {ckpt_dir}")
201
+ optim_state = load_sharded_optimizer_state_dict(
202
+ model_state_dict=_get_model_state_dict(model, adapter_only=adapter_only),
203
+ optimizer_key="optimizer",
204
+ storage_reader=dist_cp.FileSystemReader(ckpt_dir),
205
+ )
206
+ optim_state = optim_state["optimizer"]
207
+ logger.info(f"Optimizer loaded from {ckpt_dir}")
208
+ flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state)
209
+ optimizer.load_state_dict(flattened_osd)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/imports.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import importlib.metadata
17
+ import os
18
+ import warnings
19
+ from functools import lru_cache
20
+
21
+ import torch
22
+ from packaging import version
23
+ from packaging.version import parse
24
+
25
+ from .environment import parse_flag_from_env, str_to_bool
26
+ from .versions import compare_versions, is_torch_version
27
+
28
+
29
+ # Try to run Torch native job in an environment with TorchXLA installed by setting this value to 0.
30
+ USE_TORCH_XLA = parse_flag_from_env("USE_TORCH_XLA", default=True)
31
+
32
+ _torch_xla_available = False
33
+ if USE_TORCH_XLA:
34
+ try:
35
+ import torch_xla.core.xla_model as xm # noqa: F401
36
+ import torch_xla.runtime
37
+
38
+ _torch_xla_available = True
39
+ except ImportError:
40
+ pass
41
+
42
+ # Keep it for is_tpu_available. It will be removed along with is_tpu_available.
43
+ _tpu_available = _torch_xla_available
44
+
45
+ # Cache this result has it's a C FFI call which can be pretty time-consuming
46
+ _torch_distributed_available = torch.distributed.is_available()
47
+
48
+
49
+ def _is_package_available(pkg_name, metadata_name=None):
50
+ # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
51
+ package_exists = importlib.util.find_spec(pkg_name) is not None
52
+ if package_exists:
53
+ try:
54
+ # Some libraries have different names in the metadata
55
+ _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name)
56
+ return True
57
+ except importlib.metadata.PackageNotFoundError:
58
+ return False
59
+
60
+
61
+ def is_torch_distributed_available() -> bool:
62
+ return _torch_distributed_available
63
+
64
+
65
+ def is_ccl_available():
66
+ try:
67
+ pass
68
+ except ImportError:
69
+ print(
70
+ "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not"
71
+ " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL"
72
+ " Bindings for PyTorch*."
73
+ )
74
+ return (
75
+ importlib.util.find_spec("torch_ccl") is not None
76
+ or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
77
+ )
78
+
79
+
80
+ def get_ccl_version():
81
+ return importlib.metadata.version("oneccl_bind_pt")
82
+
83
+
84
+ def is_pynvml_available():
85
+ return _is_package_available("pynvml")
86
+
87
+
88
+ def is_msamp_available():
89
+ return _is_package_available("msamp", "ms-amp")
90
+
91
+
92
+ def is_transformer_engine_available():
93
+ return _is_package_available("transformer_engine")
94
+
95
+
96
+ def is_fp8_available():
97
+ return is_msamp_available() or is_transformer_engine_available()
98
+
99
+
100
+ def is_cuda_available():
101
+ """
102
+ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
103
+ uninitialized.
104
+ """
105
+ pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK")
106
+ try:
107
+ os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1)
108
+ available = torch.cuda.is_available()
109
+ finally:
110
+ if pytorch_nvml_based_cuda_check_previous_value:
111
+ os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value
112
+ else:
113
+ os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None)
114
+
115
+ return available
116
+
117
+
118
+ @lru_cache
119
+ def is_tpu_available(check_device=True):
120
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
121
+ warnings.warn(
122
+ "`is_tpu_available` is deprecated and will be removed in v0.27.0. "
123
+ "Please use the `is_torch_xla_available` instead.",
124
+ FutureWarning,
125
+ )
126
+ # Due to bugs on the amp series GPUs, we disable torch-xla on them
127
+ if is_cuda_available():
128
+ return False
129
+ if check_device:
130
+ if _tpu_available:
131
+ try:
132
+ # Will raise a RuntimeError if no XLA configuration is found
133
+ _ = xm.xla_device()
134
+ return True
135
+ except RuntimeError:
136
+ return False
137
+ return _tpu_available
138
+
139
+
140
+ @lru_cache
141
+ def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False):
142
+ """
143
+ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
144
+ the USE_TORCH_XLA to false.
145
+ """
146
+ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true."
147
+
148
+ if not _torch_xla_available:
149
+ return False
150
+ elif check_is_gpu:
151
+ return torch_xla.runtime.device_type() in ["GPU", "CUDA"]
152
+ elif check_is_tpu:
153
+ return torch_xla.runtime.device_type() == "TPU"
154
+
155
+ return True
156
+
157
+
158
+ def is_deepspeed_available():
159
+ if is_mlu_available():
160
+ return _is_package_available("deepspeed", metadata_name="deepspeed-mlu")
161
+ return _is_package_available("deepspeed")
162
+
163
+
164
+ def is_pippy_available():
165
+ package_exists = _is_package_available("pippy", "torchpippy")
166
+ if package_exists:
167
+ pippy_version = version.parse(importlib.metadata.version("torchpippy"))
168
+ return compare_versions(pippy_version, ">", "0.1.1")
169
+ return False
170
+
171
+
172
+ def is_bf16_available(ignore_tpu=False):
173
+ "Checks if bf16 is supported, optionally ignoring the TPU"
174
+ if is_torch_xla_available(check_is_tpu=True):
175
+ return not ignore_tpu
176
+ if is_cuda_available():
177
+ return torch.cuda.is_bf16_supported()
178
+ return True
179
+
180
+
181
+ def is_4bit_bnb_available():
182
+ package_exists = _is_package_available("bitsandbytes")
183
+ if package_exists:
184
+ bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
185
+ return compare_versions(bnb_version, ">=", "0.39.0")
186
+ return False
187
+
188
+
189
+ def is_8bit_bnb_available():
190
+ package_exists = _is_package_available("bitsandbytes")
191
+ if package_exists:
192
+ bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
193
+ return compare_versions(bnb_version, ">=", "0.37.2")
194
+ return False
195
+
196
+
197
+ def is_bnb_available():
198
+ return _is_package_available("bitsandbytes")
199
+
200
+
201
+ def is_megatron_lm_available():
202
+ if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
203
+ package_exists = importlib.util.find_spec("megatron") is not None
204
+ if package_exists:
205
+ try:
206
+ megatron_version = parse(importlib.metadata.version("megatron-lm"))
207
+ return compare_versions(megatron_version, ">=", "2.2.0")
208
+ except Exception as e:
209
+ warnings.warn(f"Parse Megatron version failed. Exception:{e}")
210
+ return False
211
+
212
+
213
+ def is_transformers_available():
214
+ return _is_package_available("transformers")
215
+
216
+
217
+ def is_datasets_available():
218
+ return _is_package_available("datasets")
219
+
220
+
221
+ def is_peft_available():
222
+ return _is_package_available("peft")
223
+
224
+
225
+ def is_timm_available():
226
+ return _is_package_available("timm")
227
+
228
+
229
+ def is_aim_available():
230
+ package_exists = _is_package_available("aim")
231
+ if package_exists:
232
+ aim_version = version.parse(importlib.metadata.version("aim"))
233
+ return compare_versions(aim_version, "<", "4.0.0")
234
+ return False
235
+
236
+
237
+ def is_tensorboard_available():
238
+ return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
239
+
240
+
241
+ def is_wandb_available():
242
+ return _is_package_available("wandb")
243
+
244
+
245
+ def is_comet_ml_available():
246
+ return _is_package_available("comet_ml")
247
+
248
+
249
+ def is_boto3_available():
250
+ return _is_package_available("boto3")
251
+
252
+
253
+ def is_rich_available():
254
+ if _is_package_available("rich"):
255
+ if "ACCELERATE_DISABLE_RICH" in os.environ:
256
+ warnings.warn(
257
+ "`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`."
258
+ )
259
+ return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False)
260
+ return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False)
261
+ return False
262
+
263
+
264
+ def is_sagemaker_available():
265
+ return _is_package_available("sagemaker")
266
+
267
+
268
+ def is_tqdm_available():
269
+ return _is_package_available("tqdm")
270
+
271
+
272
+ def is_clearml_available():
273
+ return _is_package_available("clearml")
274
+
275
+
276
+ def is_pandas_available():
277
+ return _is_package_available("pandas")
278
+
279
+
280
+ def is_mlflow_available():
281
+ if _is_package_available("mlflow"):
282
+ return True
283
+
284
+ if importlib.util.find_spec("mlflow") is not None:
285
+ try:
286
+ _ = importlib.metadata.metadata("mlflow-skinny")
287
+ return True
288
+ except importlib.metadata.PackageNotFoundError:
289
+ return False
290
+ return False
291
+
292
+
293
+ def is_mps_available():
294
+ return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
295
+
296
+
297
+ def is_ipex_available():
298
+ def get_major_and_minor_from_version(full_version):
299
+ return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
300
+
301
+ _torch_version = importlib.metadata.version("torch")
302
+ if importlib.util.find_spec("intel_extension_for_pytorch") is None:
303
+ return False
304
+ _ipex_version = "N/A"
305
+ try:
306
+ _ipex_version = importlib.metadata.version("intel_extension_for_pytorch")
307
+ except importlib.metadata.PackageNotFoundError:
308
+ return False
309
+ torch_major_and_minor = get_major_and_minor_from_version(_torch_version)
310
+ ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version)
311
+ if torch_major_and_minor != ipex_major_and_minor:
312
+ warnings.warn(
313
+ f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*,"
314
+ f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
315
+ )
316
+ return False
317
+ return True
318
+
319
+
320
+ @lru_cache
321
+ def is_mlu_available(check_device=False):
322
+ "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment"
323
+ if importlib.util.find_spec("torch_mlu") is None:
324
+ return False
325
+
326
+ import torch
327
+ import torch_mlu # noqa: F401
328
+
329
+ if check_device:
330
+ try:
331
+ # Will raise a RuntimeError if no MLU is found
332
+ _ = torch.mlu.device_count()
333
+ return torch.mlu.is_available()
334
+ except RuntimeError:
335
+ return False
336
+ return hasattr(torch, "mlu") and torch.mlu.is_available()
337
+
338
+
339
+ @lru_cache
340
+ def is_npu_available(check_device=False):
341
+ "Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
342
+ if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
343
+ return False
344
+
345
+ import torch
346
+ import torch_npu # noqa: F401
347
+
348
+ if check_device:
349
+ try:
350
+ # Will raise a RuntimeError if no NPU is found
351
+ _ = torch.npu.device_count()
352
+ return torch.npu.is_available()
353
+ except RuntimeError:
354
+ return False
355
+ return hasattr(torch, "npu") and torch.npu.is_available()
356
+
357
+
358
+ @lru_cache
359
+ def is_xpu_available(check_device=False):
360
+ "check if user disables it explicitly"
361
+ if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True):
362
+ return False
363
+ "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
364
+ if is_ipex_available():
365
+ import torch
366
+
367
+ if is_torch_version("<=", "1.12"):
368
+ return False
369
+ else:
370
+ return False
371
+
372
+ import intel_extension_for_pytorch # noqa: F401
373
+
374
+ if check_device:
375
+ try:
376
+ # Will raise a RuntimeError if no XPU is found
377
+ _ = torch.xpu.device_count()
378
+ return torch.xpu.is_available()
379
+ except RuntimeError:
380
+ return False
381
+ return hasattr(torch, "xpu") and torch.xpu.is_available()
382
+
383
+
384
+ def is_dvclive_available():
385
+ return _is_package_available("dvclive")
env-llmeval/lib/python3.10/site-packages/accelerate/utils/launch.py ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+ import subprocess
18
+ import sys
19
+ import warnings
20
+ from ast import literal_eval
21
+ from shutil import which
22
+ from typing import Any, Dict, List, Tuple
23
+
24
+ import torch
25
+
26
+ from ..commands.config.config_args import SageMakerConfig
27
+ from ..utils import (
28
+ DynamoBackend,
29
+ PrecisionType,
30
+ is_ipex_available,
31
+ is_mlu_available,
32
+ is_npu_available,
33
+ is_torch_xla_available,
34
+ is_xpu_available,
35
+ )
36
+ from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS
37
+ from ..utils.other import is_port_in_use, merge_dicts
38
+ from .dataclasses import DistributedType, SageMakerDistributedType
39
+
40
+
41
+ def _filter_args(args, parser, default_args=[]):
42
+ """
43
+ Filters out all `accelerate` specific args
44
+ """
45
+ new_args, _ = parser.parse_known_args(default_args)
46
+ for key, value in vars(args).items():
47
+ if key in vars(new_args).keys():
48
+ setattr(new_args, key, value)
49
+ return new_args
50
+
51
+
52
+ def _get_mpirun_args():
53
+ """
54
+ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs
55
+ are: OpenMPI, Intel MPI, or MVAPICH.
56
+
57
+ Returns: Program name and arg names for hostfile, num processes, and processes per node
58
+ """
59
+ # Find the MPI program name
60
+ mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)]
61
+
62
+ if len(mpi_apps) == 0:
63
+ raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.")
64
+
65
+ # Call the app with the --version flag to determine which MPI app is installed
66
+ mpi_app = mpi_apps[0]
67
+ mpirun_version = subprocess.check_output([mpi_app, "--version"])
68
+
69
+ if b"Open MPI" in mpirun_version:
70
+ return mpi_app, "--hostfile", "-n", "--npernode"
71
+ else:
72
+ # Intel MPI and MVAPICH both use the same arg names
73
+ return mpi_app, "-f", "-n", "-ppn"
74
+
75
+
76
+ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
77
+ """
78
+ Prepares and returns the command list and an environment with the correct simple launcher environment variables.
79
+ """
80
+ cmd = []
81
+ if args.no_python and args.module:
82
+ raise ValueError("--module and --no_python cannot be used together")
83
+
84
+ if args.mpirun_hostfile is not None:
85
+ mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg = _get_mpirun_args()
86
+ mpirun_ccl = getattr(args, "mpirun_ccl", None)
87
+ num_machines = args.num_machines
88
+ num_processes = getattr(args, "num_processes", None)
89
+ nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1"
90
+ cmd += [mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node]
91
+ if num_processes:
92
+ cmd += [num_proc_arg, str(num_processes)]
93
+ if not args.no_python:
94
+ cmd.append(sys.executable)
95
+ if args.module:
96
+ cmd.append("-m")
97
+ cmd.append(args.training_script)
98
+ cmd.extend(args.training_script_args)
99
+
100
+ current_env = os.environ.copy()
101
+ current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu)
102
+ if args.debug:
103
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
104
+ if args.gpu_ids != "all" and args.gpu_ids is not None:
105
+ if is_xpu_available():
106
+ current_env["ZE_AFFINITY_MASK"] = args.gpu_ids
107
+ elif is_mlu_available():
108
+ current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids
109
+ elif is_npu_available():
110
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids
111
+ else:
112
+ current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
113
+ if args.num_machines > 1:
114
+ current_env["MASTER_ADDR"] = args.main_process_ip
115
+ current_env["MASTER_PORT"] = str(args.main_process_port)
116
+
117
+ if args.mpirun_hostfile is not None:
118
+ current_env["CCL_WORKER_COUNT"] = mpirun_ccl
119
+ elif args.num_processes > 1:
120
+ current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1"
121
+ current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500"
122
+
123
+ try:
124
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
125
+ except ValueError:
126
+ raise ValueError(
127
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
128
+ )
129
+
130
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
131
+
132
+ try:
133
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
134
+ except ValueError:
135
+ raise ValueError(
136
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
137
+ )
138
+ current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
139
+ current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
140
+ current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
141
+ current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
142
+
143
+ current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
144
+ if is_ipex_available():
145
+ current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower()
146
+ current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower()
147
+ if args.enable_cpu_affinity:
148
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
149
+ return cmd, current_env
150
+
151
+
152
+ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
153
+ """
154
+ Prepares and returns an environment with the correct multi-GPU environment variables.
155
+ """
156
+ num_processes = args.num_processes
157
+ num_machines = args.num_machines
158
+ main_process_ip = args.main_process_ip
159
+ main_process_port = args.main_process_port
160
+ if num_machines > 1:
161
+ args.nproc_per_node = str(num_processes // num_machines)
162
+ args.nnodes = str(num_machines)
163
+ args.node_rank = int(args.machine_rank)
164
+ if getattr(args, "same_network", False):
165
+ args.master_addr = str(main_process_ip)
166
+ args.master_port = str(main_process_port)
167
+ else:
168
+ args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}"
169
+ else:
170
+ args.nproc_per_node = str(num_processes)
171
+ if main_process_port is not None:
172
+ args.master_port = str(main_process_port)
173
+
174
+ if main_process_port is None:
175
+ main_process_port = 29500
176
+
177
+ # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
178
+ # for some reasons like splitting log files.
179
+ need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
180
+ if need_port_check and is_port_in_use(main_process_port):
181
+ raise ConnectionError(
182
+ f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
183
+ "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)"
184
+ " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
185
+ )
186
+
187
+ if args.module and args.no_python:
188
+ raise ValueError("--module and --no_python cannot be used together")
189
+ elif args.module:
190
+ args.module = True
191
+ elif args.no_python:
192
+ args.no_python = True
193
+
194
+ current_env = os.environ.copy()
195
+ if args.debug:
196
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
197
+ gpu_ids = getattr(args, "gpu_ids", "all")
198
+ if gpu_ids != "all" and args.gpu_ids is not None:
199
+ if is_xpu_available():
200
+ current_env["ZE_AFFINITY_MASK"] = gpu_ids
201
+ elif is_mlu_available():
202
+ current_env["MLU_VISIBLE_DEVICES"] = gpu_ids
203
+ elif is_npu_available():
204
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids
205
+ else:
206
+ current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
207
+ mixed_precision = args.mixed_precision.lower()
208
+ try:
209
+ mixed_precision = PrecisionType(mixed_precision)
210
+ except ValueError:
211
+ raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.")
212
+
213
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
214
+
215
+ try:
216
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
217
+ except ValueError:
218
+ raise ValueError(
219
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
220
+ )
221
+ current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
222
+ current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
223
+ current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
224
+ current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
225
+
226
+ if args.use_fsdp:
227
+ current_env["ACCELERATE_USE_FSDP"] = "true"
228
+ if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
229
+ raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
230
+
231
+ current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
232
+ current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
233
+ current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
234
+ if args.fsdp_auto_wrap_policy is not None:
235
+ current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy)
236
+ if args.fsdp_transformer_layer_cls_to_wrap is not None:
237
+ current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap)
238
+ if args.fsdp_backward_prefetch_policy is not None:
239
+ warnings.warn(
240
+ "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use"
241
+ " `fsdp_backward_prefetch` instead",
242
+ FutureWarning,
243
+ )
244
+ args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy
245
+ if args.fsdp_backward_prefetch is not None:
246
+ current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch)
247
+ if args.fsdp_state_dict_type is not None:
248
+ current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type)
249
+ current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower()
250
+ current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
251
+ current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
252
+ current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
253
+
254
+ if args.use_megatron_lm:
255
+ prefix = "MEGATRON_LM_"
256
+ current_env["ACCELERATE_USE_MEGATRON_LM"] = "true"
257
+ current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree)
258
+ current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree)
259
+ current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping)
260
+ if args.megatron_lm_num_micro_batches is not None:
261
+ current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches)
262
+ if args.megatron_lm_sequence_parallelism is not None:
263
+ current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism)
264
+ if args.megatron_lm_recompute_activations is not None:
265
+ current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations)
266
+ if args.megatron_lm_use_distributed_optimizer is not None:
267
+ current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer)
268
+
269
+ current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
270
+ if args.enable_cpu_affinity:
271
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
272
+ return current_env
273
+
274
+
275
+ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
276
+ """
277
+ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables.
278
+ """
279
+ num_processes = args.num_processes
280
+ num_machines = args.num_machines
281
+ main_process_ip = args.main_process_ip
282
+ main_process_port = args.main_process_port
283
+ cmd = None
284
+
285
+ # make sure launcher is not None
286
+ if args.deepspeed_multinode_launcher is None:
287
+ # set to default pdsh
288
+ args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0]
289
+
290
+ if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
291
+ cmd = ["deepspeed", "--no_local_rank"]
292
+ cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)])
293
+ if args.deepspeed_exclusion_filter is not None:
294
+ cmd.extend(
295
+ [
296
+ "--exclude",
297
+ str(args.deepspeed_exclusion_filter),
298
+ ]
299
+ )
300
+ elif args.deepspeed_inclusion_filter is not None:
301
+ cmd.extend(
302
+ [
303
+ "--include",
304
+ str(args.deepspeed_inclusion_filter),
305
+ ]
306
+ )
307
+ else:
308
+ cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)])
309
+ if main_process_ip:
310
+ cmd.extend(["--master_addr", str(main_process_ip)])
311
+ cmd.extend(["--master_port", str(main_process_port)])
312
+ if args.module and args.no_python:
313
+ raise ValueError("--module and --no_python cannot be used together")
314
+ elif args.module:
315
+ cmd.append("--module")
316
+ elif args.no_python:
317
+ cmd.append("--no_python")
318
+ cmd.append(args.training_script)
319
+ cmd.extend(args.training_script_args)
320
+ elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:
321
+ args.nproc_per_node = str(num_processes // num_machines)
322
+ args.nnodes = str(num_machines)
323
+ args.node_rank = int(args.machine_rank)
324
+ if getattr(args, "same_network", False):
325
+ args.master_addr = str(main_process_ip)
326
+ args.master_port = str(main_process_port)
327
+ else:
328
+ args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}"
329
+ else:
330
+ args.nproc_per_node = str(num_processes)
331
+ if main_process_port is not None:
332
+ args.master_port = str(main_process_port)
333
+
334
+ if main_process_port is None:
335
+ main_process_port = 29500
336
+
337
+ # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
338
+ # for some reasons like splitting log files.
339
+ need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
340
+ if need_port_check and is_port_in_use(main_process_port):
341
+ raise ConnectionError(
342
+ f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
343
+ "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)"
344
+ " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
345
+ )
346
+
347
+ if args.module and args.no_python:
348
+ raise ValueError("--module and --no_python cannot be used together")
349
+ elif args.module:
350
+ args.module = True
351
+ elif args.no_python:
352
+ args.no_python = True
353
+
354
+ current_env = os.environ.copy()
355
+ if args.debug:
356
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
357
+ gpu_ids = getattr(args, "gpu_ids", "all")
358
+ if gpu_ids != "all" and args.gpu_ids is not None:
359
+ if is_xpu_available():
360
+ current_env["ZE_AFFINITY_MASK"] = gpu_ids
361
+ elif is_mlu_available():
362
+ current_env["MLU_VISIBLE_DEVICES"] = gpu_ids
363
+ elif is_npu_available():
364
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids
365
+ else:
366
+ current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
367
+ try:
368
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
369
+ except ValueError:
370
+ raise ValueError(
371
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
372
+ )
373
+
374
+ current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath("."))
375
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
376
+ current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower()
377
+ current_env["ACCELERATE_USE_DEEPSPEED"] = "true"
378
+ if args.zero_stage is not None:
379
+ current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage)
380
+ if args.gradient_accumulation_steps is not None:
381
+ current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps)
382
+ if args.gradient_clipping is not None:
383
+ current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower()
384
+ if args.offload_optimizer_device is not None:
385
+ current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower()
386
+ if args.offload_param_device is not None:
387
+ current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower()
388
+ if args.zero3_init_flag is not None:
389
+ current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower()
390
+ if args.zero3_save_16bit_model is not None:
391
+ current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower()
392
+ if args.deepspeed_config_file is not None:
393
+ current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file)
394
+ if args.enable_cpu_affinity:
395
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
396
+ return cmd, current_env
397
+
398
+
399
+ def prepare_tpu(
400
+ args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False
401
+ ) -> Tuple[argparse.Namespace, Dict[str, str]]:
402
+ """
403
+ Prepares and returns an environment with the correct TPU environment variables.
404
+ """
405
+ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True):
406
+ if args.downcast_bf16:
407
+ current_env["XLA_DOWNCAST_BF16"] = "1"
408
+ else:
409
+ current_env["XLA_USE_BF16"] = "1"
410
+ if args.debug:
411
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
412
+ if pod:
413
+ # Take explicit args and set them up for XLA
414
+ args.vm = args.tpu_vm
415
+ args.tpu = args.tpu_name
416
+ return args, current_env
417
+
418
+
419
+ def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
420
+ if len(nargs) < 0:
421
+ return {}
422
+ # helper function to infer type for argsparser
423
+
424
+ def _infer_type(s):
425
+ try:
426
+ s = float(s)
427
+
428
+ if s // 1 == s:
429
+ return int(s)
430
+ return s
431
+ except ValueError:
432
+ return s
433
+
434
+ parser = argparse.ArgumentParser()
435
+ _, unknown = parser.parse_known_args(nargs)
436
+ for index, argument in enumerate(unknown):
437
+ if argument.startswith(("-", "--")):
438
+ action = None
439
+ if index + 1 < len(unknown): # checks if next index would be in list
440
+ if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key
441
+ # raise an error if element is store_true or store_false
442
+ raise ValueError(
443
+ "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
444
+ )
445
+ else: # raise an error if last element is store_true or store_false
446
+ raise ValueError(
447
+ "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
448
+ )
449
+ # adds argument to parser based on action_store true
450
+ if action is None:
451
+ parser.add_argument(argument, type=_infer_type)
452
+ else:
453
+ parser.add_argument(argument, action=action)
454
+
455
+ return {
456
+ key: (literal_eval(value) if value in ("True", "False") else value)
457
+ for key, value in parser.parse_args(nargs).__dict__.items()
458
+ }
459
+
460
+
461
+ def prepare_sagemager_args_inputs(
462
+ sagemaker_config: SageMakerConfig, args: argparse.Namespace
463
+ ) -> Tuple[argparse.Namespace, Dict[str, Any]]:
464
+ # configure environment
465
+ print("Configuring Amazon SageMaker environment")
466
+ os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
467
+
468
+ # configure credentials
469
+ if sagemaker_config.profile is not None:
470
+ os.environ["AWS_PROFILE"] = sagemaker_config.profile
471
+ elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:
472
+ os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id
473
+ os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key
474
+ else:
475
+ raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile")
476
+
477
+ # extract needed arguments
478
+ source_dir = os.path.dirname(args.training_script)
479
+ if not source_dir: # checks if string is empty
480
+ source_dir = "."
481
+ entry_point = os.path.basename(args.training_script)
482
+ if not entry_point.endswith(".py"):
483
+ raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
484
+
485
+ print("Converting Arguments to Hyperparameters")
486
+ hyperparameters = _convert_nargs_to_dict(args.training_script_args)
487
+
488
+ try:
489
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
490
+ except ValueError:
491
+ raise ValueError(
492
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
493
+ )
494
+
495
+ try:
496
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
497
+ except ValueError:
498
+ raise ValueError(
499
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
500
+ )
501
+
502
+ # Environment variables to be set for use during training job
503
+ environment = {
504
+ "ACCELERATE_USE_SAGEMAKER": "true",
505
+ "ACCELERATE_MIXED_PRECISION": str(mixed_precision),
506
+ "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value,
507
+ "ACCELERATE_DYNAMO_MODE": args.dynamo_mode,
508
+ "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph),
509
+ "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic),
510
+ "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value,
511
+ }
512
+ # configure distribution set up
513
+ distribution = None
514
+ if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:
515
+ distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
516
+
517
+ # configure sagemaker inputs
518
+ sagemaker_inputs = None
519
+ if sagemaker_config.sagemaker_inputs_file is not None:
520
+ print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file")
521
+ sagemaker_inputs = {}
522
+ with open(sagemaker_config.sagemaker_inputs_file) as file:
523
+ for i, line in enumerate(file):
524
+ if i == 0:
525
+ continue
526
+ l = line.split("\t")
527
+ sagemaker_inputs[l[0]] = l[1].strip()
528
+ print(f"Loaded SageMaker Inputs: {sagemaker_inputs}")
529
+
530
+ # configure sagemaker metrics
531
+ sagemaker_metrics = None
532
+ if sagemaker_config.sagemaker_metrics_file is not None:
533
+ print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file")
534
+ sagemaker_metrics = []
535
+ with open(sagemaker_config.sagemaker_metrics_file) as file:
536
+ for i, line in enumerate(file):
537
+ if i == 0:
538
+ continue
539
+ l = line.split("\t")
540
+ metric_dict = {
541
+ "Name": l[0],
542
+ "Regex": l[1].strip(),
543
+ }
544
+ sagemaker_metrics.append(metric_dict)
545
+ print(f"Loaded SageMaker Metrics: {sagemaker_metrics}")
546
+
547
+ # configure session
548
+ print("Creating Estimator")
549
+ args = {
550
+ "image_uri": sagemaker_config.image_uri,
551
+ "entry_point": entry_point,
552
+ "source_dir": source_dir,
553
+ "role": sagemaker_config.iam_role_name,
554
+ "transformers_version": sagemaker_config.transformers_version,
555
+ "pytorch_version": sagemaker_config.pytorch_version,
556
+ "py_version": sagemaker_config.py_version,
557
+ "base_job_name": sagemaker_config.base_job_name,
558
+ "instance_count": sagemaker_config.num_machines,
559
+ "instance_type": sagemaker_config.ec2_instance_type,
560
+ "debugger_hook_config": False,
561
+ "distribution": distribution,
562
+ "hyperparameters": hyperparameters,
563
+ "environment": environment,
564
+ "metric_definitions": sagemaker_metrics,
565
+ }
566
+
567
+ if sagemaker_config.additional_args is not None:
568
+ args = merge_dicts(sagemaker_config.additional_args, args)
569
+ return args, sagemaker_inputs
570
+
571
+
572
+ def env_var_path_add(env_var_name, path_to_add):
573
+ """
574
+ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the
575
+ caller to set it in os.environ.
576
+ """
577
+ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0]
578
+ paths.append(str(path_to_add))
579
+ return ":".join(paths)
580
+
581
+
582
+ class PrepareForLaunch:
583
+ """
584
+ Prepare a function that will launched in a distributed setup.
585
+
586
+ Args:
587
+ launcher (`Callable`):
588
+ The function to launch.
589
+ distributed_type ([`~state.DistributedType`]):
590
+ The distributed type to prepare for.
591
+ debug (`bool`, *optional*, defaults to `False`):
592
+ Whether or not this is a debug launch.
593
+ """
594
+
595
+ def __init__(self, launcher, distributed_type="NO", debug=False):
596
+ self.launcher = launcher
597
+ self.distributed_type = DistributedType(distributed_type)
598
+ self.debug = debug
599
+
600
+ def __call__(self, index, *args):
601
+ if self.debug:
602
+ world_size = int(os.environ.get("WORLD_SIZE"))
603
+ rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE")
604
+ torch.distributed.init_process_group(
605
+ "gloo",
606
+ rank=index,
607
+ store=torch.distributed.FileStore(rdv_file, world_size),
608
+ world_size=world_size,
609
+ )
610
+ elif self.distributed_type in (
611
+ DistributedType.MULTI_GPU,
612
+ DistributedType.MULTI_MLU,
613
+ DistributedType.MULTI_NPU,
614
+ DistributedType.MULTI_XPU,
615
+ DistributedType.MULTI_CPU,
616
+ ):
617
+ # Prepare the environment for torch.distributed
618
+ os.environ["LOCAL_RANK"] = str(index)
619
+ nproc = int(os.environ.get("NPROC", 1))
620
+ node_rank = int(os.environ.get("NODE_RANK", 0))
621
+ os.environ["RANK"] = str(nproc * node_rank + index)
622
+
623
+ os.environ["FORK_LAUNCHED"] = str(1)
624
+ self.launcher(*args)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import math
17
+ from abc import ABC
18
+ from functools import partial
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+ from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
24
+
25
+ from ..optimizer import AcceleratedOptimizer
26
+ from ..scheduler import AcceleratedScheduler
27
+ from .imports import is_megatron_lm_available, is_transformers_available
28
+ from .operations import recursively_apply, send_to_device
29
+
30
+
31
+ if is_transformers_available():
32
+ from transformers.modeling_outputs import (
33
+ CausalLMOutputWithCrossAttentions,
34
+ Seq2SeqLMOutput,
35
+ SequenceClassifierOutput,
36
+ )
37
+
38
+
39
+ if is_megatron_lm_available():
40
+ from megatron import (
41
+ get_args,
42
+ get_num_microbatches,
43
+ get_tensorboard_writer,
44
+ get_timers,
45
+ get_tokenizer,
46
+ mpu,
47
+ print_rank_0,
48
+ print_rank_last,
49
+ )
50
+ from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args
51
+ from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint
52
+ from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler
53
+ from megatron.global_vars import set_global_variables
54
+ from megatron.initialize import (
55
+ _compile_dependencies,
56
+ _init_autoresume,
57
+ _set_random_seed,
58
+ set_jit_fusion_options,
59
+ write_args_to_tensorboard,
60
+ )
61
+ from megatron.model import BertModel, Float16Module, GPTModel, ModelType, T5Model
62
+ from megatron.model import DistributedDataParallel as LocalDDP
63
+ from megatron.model.classification import Classification
64
+ from megatron.optimizer import get_megatron_optimizer
65
+ from megatron.schedules import get_forward_backward_func
66
+ from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor
67
+ from megatron.text_generation.generation import (
68
+ beam_search_and_return_on_first_stage,
69
+ generate_tokens_probs_and_return_on_first_stage,
70
+ )
71
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
72
+ from megatron.training import get_model, get_optimizer_param_scheduler, training_log
73
+ from megatron.utils import (
74
+ average_losses_across_data_parallel_group,
75
+ calc_params_l2_norm,
76
+ get_ltor_masks_and_position_ids,
77
+ unwrap_model,
78
+ )
79
+
80
+
81
+ # model utilities
82
+ def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True):
83
+ """Build the model."""
84
+ args = get_args()
85
+ mode = "pre-training" if args.pretraining_flag else "fine-tuning"
86
+ if args.rank == 0:
87
+ print(f"Building {args.model_type_name} model in the {mode} mode.")
88
+ print(
89
+ "The Megatron LM model weights are initialized at random in `accelerator.prepare`. "
90
+ "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup."
91
+ )
92
+ if args.model_type_name == "bert":
93
+ if args.pretraining_flag:
94
+ num_tokentypes = 2 if args.bert_binary_head else 0
95
+ model = BertModel(
96
+ num_tokentypes=num_tokentypes,
97
+ add_binary_head=args.bert_binary_head,
98
+ parallel_output=True,
99
+ pre_process=pre_process,
100
+ post_process=post_process,
101
+ )
102
+ else:
103
+ model = Classification(
104
+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process
105
+ )
106
+ elif args.model_type_name == "gpt":
107
+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)
108
+ elif args.model_type_name == "t5":
109
+ model = T5Model(
110
+ num_tokentypes=0,
111
+ parallel_output=True,
112
+ pre_process=pre_process,
113
+ post_process=post_process,
114
+ add_encoder=add_encoder,
115
+ add_decoder=add_decoder,
116
+ )
117
+ else:
118
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
119
+ return model
120
+
121
+
122
+ def prepare_model(accelerator):
123
+ accelerator.print("Preparing model")
124
+ args = get_args()
125
+ if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None:
126
+ if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None:
127
+ raise ValueError(
128
+ "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`."
129
+ )
130
+ custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function
131
+ model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)
132
+ else:
133
+ if args.model_type_name in ("bert", "gpt"):
134
+ model_type = ModelType.encoder_or_decoder
135
+ elif args.model_type_name == "t5":
136
+ model_type = ModelType.encoder_and_decoder
137
+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:
138
+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2
139
+ model = get_model(model_provider_func, model_type)
140
+ return model
141
+
142
+
143
+ # dataloader utilities
144
+ class MegatronLMDummyDataLoader:
145
+ """
146
+ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training
147
+
148
+ Args:
149
+ **dataset_kwargs: Megatron data arguments.
150
+ """
151
+
152
+ def __init__(self, **dataset_kwargs):
153
+ parser = argparse.ArgumentParser()
154
+ parser = _add_data_args(parser)
155
+ parser = _add_validation_args(parser)
156
+ data_args = parser.parse_known_args()
157
+ self.dataset_args = vars(data_args[0])
158
+ self.dataset_args.update(dataset_kwargs)
159
+ self.dataset_args["megatron_dataset_flag"] = True
160
+
161
+ def set_megatron_data_args(self):
162
+ args = get_args()
163
+ for key, value in self.dataset_args.items():
164
+ setattr(args, key, value)
165
+
166
+ def get_train_valid_test_datasets_provider(self):
167
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
168
+ """Build train, valid, and test datasets."""
169
+ args = get_args()
170
+ dataset_args = {
171
+ "data_prefix": args.data_path,
172
+ "data_impl": args.data_impl,
173
+ "splits_string": args.split,
174
+ "train_valid_test_num_samples": train_val_test_num_samples,
175
+ "skip_warmup": (not args.mmap_warmup),
176
+ "seed": args.seed,
177
+ }
178
+ if args.model_type_name == "bert":
179
+ dataset_args.update(
180
+ {
181
+ "max_seq_length": args.seq_length,
182
+ "masked_lm_prob": args.mask_prob,
183
+ "short_seq_prob": args.short_seq_prob,
184
+ "binary_head": args.bert_binary_head,
185
+ }
186
+ )
187
+ elif args.model_type_name == "gpt":
188
+ dataset_args.update(
189
+ {
190
+ "seq_length": args.seq_length,
191
+ }
192
+ )
193
+ elif args.model_type_name == "t5":
194
+ dataset_args.update(
195
+ {
196
+ "max_seq_length": args.encoder_seq_length,
197
+ "max_seq_length_dec": args.decoder_seq_length,
198
+ "masked_lm_prob": args.mask_prob,
199
+ "short_seq_prob": args.short_seq_prob,
200
+ "dataset_type": "t5",
201
+ }
202
+ )
203
+ else:
204
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
205
+ if args.model_type_name == "gpt":
206
+ from megatron.data.gpt_dataset import build_train_valid_test_datasets
207
+ else:
208
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
209
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
210
+ return train_ds, valid_ds, test_ds
211
+
212
+ return train_valid_test_datasets_provider
213
+
214
+ def build_pretraining_data_loader(self, dataset, consumed_samples):
215
+ if dataset is None:
216
+ return None
217
+ args = get_args()
218
+ micro_batch_size = args.micro_batch_size * args.num_micro_batches
219
+
220
+ # Megatron sampler
221
+ if args.dataloader_type == "single":
222
+ batch_sampler = MegatronPretrainingSampler(
223
+ total_samples=len(dataset),
224
+ consumed_samples=consumed_samples,
225
+ micro_batch_size=micro_batch_size,
226
+ data_parallel_rank=mpu.get_data_parallel_rank(),
227
+ data_parallel_size=mpu.get_data_parallel_world_size(),
228
+ )
229
+ elif args.dataloader_type == "cyclic":
230
+ batch_sampler = MegatronPretrainingRandomSampler(
231
+ dataset,
232
+ total_samples=len(dataset),
233
+ consumed_samples=consumed_samples,
234
+ micro_batch_size=micro_batch_size,
235
+ data_parallel_rank=mpu.get_data_parallel_rank(),
236
+ data_parallel_size=mpu.get_data_parallel_world_size(),
237
+ data_sharding=args.data_sharding,
238
+ )
239
+ else:
240
+ raise Exception(f"{args.dataloader_type} dataloader type is not supported.")
241
+
242
+ # Torch dataloader.
243
+ return torch.utils.data.DataLoader(
244
+ dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
245
+ )
246
+
247
+ def build_train_valid_test_data_iterators(self):
248
+ def cyclic_iter(iter):
249
+ while True:
250
+ yield from iter
251
+
252
+ args = get_args()
253
+
254
+ (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
255
+
256
+ print_rank_0("> building train, validation, and test datasets ...")
257
+
258
+ # Backward compatibility, assume fixed batch size.
259
+ if args.iteration > 0 and args.consumed_train_samples == 0:
260
+ assert args.train_samples is None, "only backward compatiblity support for iteration-based training"
261
+ args.consumed_train_samples = args.iteration * args.global_batch_size
262
+ if args.iteration > 0 and args.consumed_valid_samples == 0:
263
+ if args.train_samples is None:
264
+ args.consumed_valid_samples = (
265
+ (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size
266
+ )
267
+
268
+ # Data loader only on rank 0 of each model parallel group.
269
+ if mpu.get_tensor_model_parallel_rank() == 0:
270
+ # Number of train/valid/test samples.
271
+ if args.train_samples:
272
+ train_samples = args.train_samples
273
+ else:
274
+ train_samples = args.train_iters * args.global_batch_size
275
+ eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters
276
+ test_iters = args.eval_iters
277
+ train_val_test_num_samples = [
278
+ train_samples,
279
+ eval_iters * args.global_batch_size,
280
+ test_iters * args.global_batch_size,
281
+ ]
282
+ print_rank_0(" > datasets target sizes (minimum size):")
283
+ print_rank_0(f" train: {train_val_test_num_samples[0]}")
284
+ print_rank_0(f" validation: {train_val_test_num_samples[1]}")
285
+ print_rank_0(f" test: {train_val_test_num_samples[2]}")
286
+
287
+ # Build the datasets.
288
+ train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()
289
+ train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)
290
+
291
+ # Build dataloders.
292
+ train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)
293
+ valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)
294
+ test_dataloader = self.build_pretraining_data_loader(test_ds, 0)
295
+
296
+ # Flags to know if we need to do training/validation/testing.
297
+ do_train = train_dataloader is not None and args.train_iters > 0
298
+ do_valid = valid_dataloader is not None and args.eval_iters > 0
299
+ do_test = test_dataloader is not None and args.eval_iters > 0
300
+ # Need to broadcast num_tokens and num_type_tokens.
301
+ flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])
302
+ else:
303
+ flags = torch.cuda.LongTensor([0, 0, 0])
304
+
305
+ # Broadcast num tokens.
306
+ torch.distributed.broadcast(
307
+ flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()
308
+ )
309
+ args.do_train = flags[0].item()
310
+ args.do_valid = flags[1].item()
311
+ args.do_test = flags[2].item()
312
+
313
+ # Build iterators.
314
+ dl_type = args.dataloader_type
315
+ assert dl_type in ["single", "cyclic"]
316
+
317
+ if train_dataloader is not None:
318
+ train_data_iterator = (
319
+ iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader))
320
+ )
321
+ else:
322
+ train_data_iterator = None
323
+
324
+ if valid_dataloader is not None:
325
+ valid_data_iterator = (
326
+ iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader))
327
+ )
328
+ else:
329
+ valid_data_iterator = None
330
+
331
+ if test_dataloader is not None:
332
+ test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader))
333
+ else:
334
+ test_data_iterator = None
335
+
336
+ return train_data_iterator, valid_data_iterator, test_data_iterator
337
+
338
+
339
+ def prepare_data_loader(accelerator, dataloader):
340
+ accelerator.print("Preparing dataloader")
341
+ args = get_args()
342
+ if not args.megatron_dataset_flag:
343
+ from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader
344
+
345
+ args = get_args()
346
+ micro_batch_size = args.micro_batch_size * args.num_micro_batches
347
+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}
348
+ if kwargs["batch_size"] is None:
349
+ if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler):
350
+ kwargs["sampler"].batch_size = micro_batch_size
351
+ else:
352
+ del kwargs["sampler"]
353
+ del kwargs["shuffle"]
354
+ del kwargs["batch_size"]
355
+ kwargs["batch_sampler"].batch_size = micro_batch_size
356
+ else:
357
+ del kwargs["batch_sampler"]
358
+ kwargs["batch_size"] = micro_batch_size
359
+
360
+ dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)
361
+ return prepare_data_loader(
362
+ dataloader,
363
+ accelerator.device,
364
+ num_processes=mpu.get_data_parallel_world_size(),
365
+ process_index=mpu.get_data_parallel_rank(),
366
+ split_batches=accelerator.split_batches,
367
+ put_on_device=True,
368
+ rng_types=accelerator.rng_types.copy(),
369
+ dispatch_batches=accelerator.dispatch_batches,
370
+ )
371
+ else:
372
+ if args.consumed_samples is not None:
373
+ (
374
+ args.consumed_train_samples,
375
+ args.consumed_valid_samples,
376
+ args.consumed_test_samples,
377
+ ) = args.consumed_samples
378
+ else:
379
+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0
380
+ (
381
+ train_data_iterator,
382
+ valid_data_iterator,
383
+ test_data_iterator,
384
+ ) = dataloader.build_train_valid_test_data_iterators()
385
+ return train_data_iterator, valid_data_iterator, test_data_iterator
386
+
387
+
388
+ # optimizer utilities
389
+ class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
390
+ def __init__(self, optimizer):
391
+ super().__init__(optimizer, device_placement=False, scaler=None)
392
+
393
+ def zero_grad(self, set_to_none=None):
394
+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
395
+
396
+ def step(self):
397
+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
398
+
399
+ @property
400
+ def step_was_skipped(self):
401
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
402
+ return self.optimizer.skipped_iter
403
+
404
+
405
+ def prepare_optimizer(accelerator, model):
406
+ accelerator.print("Preparing optimizer")
407
+ args = get_args()
408
+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)
409
+ return optimizer
410
+
411
+
412
+ # scheduler utilities
413
+ class MegatronLMDummyScheduler:
414
+ """
415
+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
416
+ loop when scheduler config is specified in the deepspeed config file.
417
+
418
+ Args:
419
+ optimizer (`torch.optim.optimizer.Optimizer`):
420
+ The optimizer to wrap.
421
+ total_num_steps (int):
422
+ Total number of steps.
423
+ warmup_num_steps (int):
424
+ Number of steps for warmup.
425
+ **kwargs (additional keyword arguments, *optional*):
426
+ Other arguments.
427
+ """
428
+
429
+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
430
+ self.optimizer = optimizer
431
+ self.total_num_steps = total_num_steps
432
+ self.warmup_num_steps = warmup_num_steps
433
+ self.kwargs = kwargs
434
+
435
+
436
+ class MegatronLMSchedulerWrapper(AcceleratedScheduler):
437
+ def __init__(self, scheduler, optimizers):
438
+ super().__init__(scheduler, optimizers)
439
+
440
+ def step(self, *args, **kwargs):
441
+ return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
442
+
443
+
444
+ def prepare_scheduler(accelerator, optimizer, scheduler):
445
+ accelerator.print("Preparing scheduler")
446
+ scheduler = get_optimizer_param_scheduler(optimizer)
447
+ return scheduler
448
+
449
+
450
+ class AbstractTrainStep(ABC):
451
+ """Abstract class for batching, forward pass and loss handler."""
452
+
453
+ def __init__(self, name):
454
+ super().__init__()
455
+ self.name = name
456
+
457
+ def get_batch_func(self):
458
+ pass
459
+
460
+ def get_forward_step_func(self):
461
+ pass
462
+
463
+ def get_loss_func(self):
464
+ pass
465
+
466
+
467
+ class BertTrainStep(AbstractTrainStep):
468
+ """
469
+ Bert train step class.
470
+
471
+ Args:
472
+ args (`argparse.Namespace`): Megatron-LM arguments.
473
+ """
474
+
475
+ def __init__(self, args):
476
+ super().__init__("BertTrainStep")
477
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
478
+ self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels)
479
+ self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head)
480
+ if not args.model_return_dict:
481
+ self.model_output_class = None
482
+ else:
483
+ self.model_output_class = SequenceClassifierOutput
484
+
485
+ def get_batch_func(self, megatron_dataset_flag):
486
+ def get_batch_megatron(data_iterator):
487
+ """Build the batch."""
488
+
489
+ # Items and their type.
490
+ keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
491
+ datatype = torch.int64
492
+
493
+ # Broadcast data.
494
+ if data_iterator is not None:
495
+ data = next(data_iterator)
496
+ else:
497
+ data = None
498
+ data_b = mpu.broadcast_data(keys, data, datatype)
499
+
500
+ # Unpack.
501
+ tokens = data_b["text"].long()
502
+ types = data_b["types"].long()
503
+ sentence_order = data_b["is_random"].long()
504
+ loss_mask = data_b["loss_mask"].float()
505
+ lm_labels = data_b["labels"].long()
506
+ padding_mask = data_b["padding_mask"].long()
507
+
508
+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
509
+
510
+ def get_batch_transformer(data_iterator):
511
+ """Build the batch."""
512
+ data = next(data_iterator)
513
+ data = send_to_device(data, torch.cuda.current_device())
514
+
515
+ # Unpack.
516
+ tokens = data["input_ids"].long()
517
+ padding_mask = data["attention_mask"].long()
518
+ if "token_type_ids" in data:
519
+ types = data["token_type_ids"].long()
520
+ else:
521
+ types = None
522
+ if "labels" in data:
523
+ lm_labels = data["labels"].long()
524
+ loss_mask = (data["labels"] != -100).to(torch.float)
525
+ else:
526
+ lm_labels = None
527
+ loss_mask = None
528
+ if "next_sentence_label" in data:
529
+ sentence_order = data["next_sentence_label"].long()
530
+ else:
531
+ sentence_order = None
532
+
533
+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
534
+
535
+ if megatron_dataset_flag:
536
+ return get_batch_megatron
537
+ else:
538
+ return get_batch_transformer
539
+
540
+ def get_loss_func(self, pretraining_flag, num_labels):
541
+ def loss_func_pretrain(loss_mask, sentence_order, output_tensor):
542
+ lm_loss_, sop_logits = output_tensor
543
+
544
+ lm_loss_ = lm_loss_.float()
545
+ loss_mask = loss_mask.float()
546
+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
547
+
548
+ if sop_logits is not None:
549
+ sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)
550
+ sop_loss = sop_loss.float()
551
+ loss = lm_loss + sop_loss
552
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])
553
+ return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]}
554
+
555
+ else:
556
+ loss = lm_loss
557
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
558
+ return loss, {"lm loss": averaged_losses[0]}
559
+
560
+ def loss_func_finetune(labels, logits):
561
+ if num_labels == 1:
562
+ # We are doing regression
563
+ loss_fct = MSELoss()
564
+ loss = loss_fct(logits.view(-1), labels.view(-1))
565
+ elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)):
566
+ loss_fct = CrossEntropyLoss()
567
+ loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))
568
+ else:
569
+ loss_fct = BCEWithLogitsLoss()
570
+ loss = loss_fct(logits, labels)
571
+ averaged_losses = average_losses_across_data_parallel_group([loss])
572
+ return loss, {"loss": averaged_losses[0]}
573
+
574
+ if pretraining_flag:
575
+ return loss_func_pretrain
576
+ else:
577
+ return loss_func_finetune
578
+
579
+ def get_forward_step_func(self, pretraining_flag, bert_binary_head):
580
+ def forward_step(data_iterator, model):
581
+ """Forward step."""
582
+ tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)
583
+ if not bert_binary_head:
584
+ types = None
585
+ # Forward pass through the model.
586
+ if pretraining_flag:
587
+ output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels)
588
+ return output_tensor, partial(self.loss_func, loss_mask, sentence_order)
589
+ else:
590
+ logits = model(tokens, padding_mask, tokentype_ids=types)
591
+ return logits, partial(self.loss_func, labels)
592
+
593
+ return forward_step
594
+
595
+
596
+ class GPTTrainStep(AbstractTrainStep):
597
+ """
598
+ GPT train step class.
599
+
600
+ Args:
601
+ args (`argparse.Namespace`): Megatron-LM arguments.
602
+ """
603
+
604
+ def __init__(self, args):
605
+ super().__init__("GPTTrainStep")
606
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
607
+ self.loss_func = self.get_loss_func()
608
+ self.forward_step = self.get_forward_step_func()
609
+ self.eod_token = args.padded_vocab_size - 1
610
+ if args.vocab_file is not None:
611
+ tokenizer = get_tokenizer()
612
+ self.eod_token = tokenizer.eod
613
+ self.reset_position_ids = args.reset_position_ids
614
+ self.reset_attention_mask = args.reset_attention_mask
615
+ self.eod_mask_loss = args.eod_mask_loss
616
+ if not args.model_return_dict:
617
+ self.model_output_class = None
618
+ else:
619
+ self.model_output_class = CausalLMOutputWithCrossAttentions
620
+
621
+ def get_batch_func(self, megatron_dataset_flag):
622
+ def get_batch_megatron(data_iterator):
623
+ """Generate a batch"""
624
+ # Items and their type.
625
+ keys = ["text"]
626
+ datatype = torch.int64
627
+
628
+ # Broadcast data.
629
+ if data_iterator is not None:
630
+ data = next(data_iterator)
631
+ else:
632
+ data = None
633
+ data_b = mpu.broadcast_data(keys, data, datatype)
634
+
635
+ # Unpack.
636
+ tokens_ = data_b["text"].long()
637
+ labels = tokens_[:, 1:].contiguous()
638
+ tokens = tokens_[:, :-1].contiguous()
639
+
640
+ # Get the masks and postition ids.
641
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
642
+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
643
+ )
644
+
645
+ return tokens, labels, loss_mask, attention_mask, position_ids
646
+
647
+ def get_batch_transformer(data_iterator):
648
+ data = next(data_iterator)
649
+ data = {"input_ids": data["input_ids"]}
650
+ data = send_to_device(data, torch.cuda.current_device())
651
+
652
+ tokens_ = data["input_ids"].long()
653
+ padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token
654
+ tokens_ = torch.concat([tokens_, padding], dim=1)
655
+ labels = tokens_[:, 1:].contiguous()
656
+ tokens = tokens_[:, :-1].contiguous()
657
+ # Get the masks and postition ids.
658
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
659
+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True
660
+ )
661
+ return tokens, labels, loss_mask, attention_mask, position_ids
662
+
663
+ if megatron_dataset_flag:
664
+ return get_batch_megatron
665
+ else:
666
+ return get_batch_transformer
667
+
668
+ def get_loss_func(self):
669
+ args = get_args()
670
+
671
+ def loss_func(loss_mask, output_tensor):
672
+ if args.return_logits:
673
+ losses, logits = output_tensor
674
+ else:
675
+ losses = output_tensor
676
+ losses = losses.float()
677
+ loss_mask = loss_mask.view(-1).float()
678
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
679
+
680
+ # Reduce loss for logging.
681
+ averaged_loss = average_losses_across_data_parallel_group([loss])
682
+
683
+ output_dict = {"lm loss": averaged_loss[0]}
684
+ if args.return_logits:
685
+ output_dict.update({"logits": logits})
686
+ return loss, output_dict
687
+
688
+ return loss_func
689
+
690
+ def get_forward_step_func(self):
691
+ def forward_step(data_iterator, model):
692
+ """Forward step."""
693
+ # Get the batch.
694
+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
695
+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
696
+
697
+ return output_tensor, partial(self.loss_func, loss_mask)
698
+
699
+ return forward_step
700
+
701
+
702
+ class T5TrainStep(AbstractTrainStep):
703
+ """
704
+ T5 train step class.
705
+
706
+ Args:
707
+ args (`argparse.Namespace`): Megatron-LM arguments.
708
+ """
709
+
710
+ def __init__(self, args):
711
+ super().__init__("T5TrainStep")
712
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
713
+ self.loss_func = self.get_loss_func()
714
+ self.forward_step = self.get_forward_step_func()
715
+ if not args.model_return_dict:
716
+ self.model_output_class = None
717
+ else:
718
+ self.model_output_class = Seq2SeqLMOutput
719
+
720
+ @staticmethod
721
+ def attn_mask_postprocess(attention_mask):
722
+ # We create a 3D attention mask from a 2D tensor mask.
723
+ # [b, 1, s]
724
+ attention_mask_b1s = attention_mask.unsqueeze(1)
725
+ # [b, s, 1]
726
+ attention_mask_bs1 = attention_mask.unsqueeze(2)
727
+ # [b, s, s]
728
+ attention_mask_bss = attention_mask_b1s * attention_mask_bs1
729
+ # Convert attention mask to binary:
730
+ extended_attention_mask = attention_mask_bss < 0.5
731
+ return extended_attention_mask
732
+
733
+ @staticmethod
734
+ def get_decoder_mask(seq_length, device):
735
+ attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
736
+ attention_mask = attention_mask < 0.5
737
+ return attention_mask
738
+
739
+ @staticmethod
740
+ def get_enc_dec_mask(attention_mask, dec_seq_length, device):
741
+ batch_size, _ = attention_mask.shape
742
+ # We create a 3D attention mask from a 2D tensor mask.
743
+ # [b, 1, s]
744
+ attention_mask_b1s = attention_mask.unsqueeze(1)
745
+ # [b, s, 1]
746
+ attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device)
747
+ attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
748
+ extended_attention_mask = attention_mask_bss < 0.5
749
+ return extended_attention_mask
750
+
751
+ def get_batch_func(self, megatron_dataset_flag):
752
+ def get_batch_megatron(data_iterator):
753
+ """Build the batch."""
754
+
755
+ keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
756
+ datatype = torch.int64
757
+
758
+ # Broadcast data.
759
+ if data_iterator is not None:
760
+ data = next(data_iterator)
761
+ else:
762
+ data = None
763
+ data_b = mpu.broadcast_data(keys, data, datatype)
764
+
765
+ # Unpack.
766
+ tokens_enc = data_b["text_enc"].long()
767
+ tokens_dec = data_b["text_dec"].long()
768
+ labels = data_b["labels"].long()
769
+ loss_mask = data_b["loss_mask"].float()
770
+
771
+ enc_mask = data_b["enc_mask"] < 0.5
772
+ dec_mask = data_b["dec_mask"] < 0.5
773
+ enc_dec_mask = data_b["enc_dec_mask"] < 0.5
774
+
775
+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
776
+
777
+ def get_batch_transformer(data_iterator):
778
+ """Build the batch."""
779
+ data = next(data_iterator)
780
+ data = send_to_device(data, torch.cuda.current_device())
781
+
782
+ tokens_enc = data["input_ids"].long()
783
+ labels = data["labels"].long()
784
+ loss_mask = (labels != -100).to(torch.float)
785
+ if "decoder_input_ids" in data:
786
+ tokens_dec = data["decoder_input_ids"].long()
787
+ else:
788
+ tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long)
789
+ tokens_dec[..., 1:] = labels[..., :-1].clone()
790
+ tokens_dec[..., 0] = 0
791
+ tokens_dec.masked_fill_(tokens_dec == -100, 0)
792
+ enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long())
793
+ dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device)
794
+ enc_dec_mask = T5TrainStep.get_enc_dec_mask(
795
+ data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device
796
+ )
797
+
798
+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
799
+
800
+ if megatron_dataset_flag:
801
+ return get_batch_megatron
802
+ else:
803
+ return get_batch_transformer
804
+
805
+ def get_loss_func(self):
806
+ def loss_func(loss_mask, output_tensor):
807
+ lm_loss_ = output_tensor.float()
808
+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
809
+
810
+ loss = lm_loss
811
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
812
+
813
+ return loss, {"lm loss": averaged_losses[0]}
814
+
815
+ return loss_func
816
+
817
+ def get_forward_step_func(self):
818
+ def forward_step(data_iterator, model):
819
+ """Forward step."""
820
+ # Get the batch.
821
+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(
822
+ data_iterator
823
+ )
824
+ # Forward model lm_labels
825
+ output_tensor = model(
826
+ tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels
827
+ )
828
+
829
+ return output_tensor, partial(self.loss_func, loss_mask)
830
+
831
+ return forward_step
832
+
833
+
834
+ # intialize megatron setup
835
+ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
836
+ accelerator.print("Initializing Megatron-LM")
837
+ assert torch.cuda.is_available(), "Megatron requires CUDA."
838
+
839
+ # Parse arguments
840
+ args = parse_args(extra_args_provider, ignore_unknown_args=True)
841
+
842
+ # Set defaults
843
+ for key, value in args_defaults.items():
844
+ if getattr(args, key, None) is not None:
845
+ if args.rank == 0:
846
+ print(
847
+ f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}",
848
+ flush=True,
849
+ )
850
+ setattr(args, key, value)
851
+
852
+ if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False):
853
+ assert args.load is not None, "--use-checkpoints-args requires --load argument"
854
+ load_args_from_checkpoint(args)
855
+
856
+ validate_args(args)
857
+
858
+ # set global args, build tokenizer, and set adlr-autoresume,
859
+ # tensorboard-writer, and timers.
860
+ set_global_variables(args)
861
+
862
+ # torch.distributed initialization
863
+ def finish_mpu_init():
864
+ args = get_args()
865
+ # Pytorch distributed.
866
+ device_count = torch.cuda.device_count()
867
+ args.rank = torch.distributed.get_rank()
868
+ args.world_size = torch.distributed.get_world_size()
869
+ if device_count > 0:
870
+ device = args.rank % device_count
871
+ if args.local_rank is not None:
872
+ assert args.local_rank == device, "expected local-rank to be the same as rank % device-count."
873
+ else:
874
+ args.local_rank = device
875
+
876
+ # Set the tensor model-parallel, pipeline model-parallel, and
877
+ # data-parallel communicators.
878
+ if mpu.model_parallel_is_initialized():
879
+ print("model parallel is already initialized")
880
+ else:
881
+ mpu.initialize_model_parallel(
882
+ args.tensor_model_parallel_size,
883
+ args.pipeline_model_parallel_size,
884
+ args.virtual_pipeline_model_parallel_size,
885
+ args.pipeline_model_parallel_split_rank,
886
+ )
887
+
888
+ # Random seeds for reproducibility.
889
+ if args.rank == 0:
890
+ print(f"> setting random seeds to {args.seed} ...")
891
+ _set_random_seed(args.seed, args.data_parallel_random_init)
892
+
893
+ args = get_args()
894
+
895
+ # Megatron's MPU is the master. Complete initialization right away.
896
+ finish_mpu_init()
897
+
898
+ # Autoresume.
899
+ _init_autoresume()
900
+
901
+ # Compile dependencies.
902
+ _compile_dependencies()
903
+
904
+ # Set pytorch JIT layer fusion options and warmup JIT functions.
905
+ set_jit_fusion_options()
906
+ args = get_args()
907
+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)
908
+ if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2:
909
+ args.bert_binary_head = True
910
+ else:
911
+ args.bert_binary_head = False
912
+ args.iteration = 0
913
+
914
+
915
+ class MegatronEngine(torch.nn.Module):
916
+ """
917
+ Megatron-LM model wrapper
918
+
919
+ Args:
920
+ accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.
921
+ model: Megatron-LM model
922
+ optimizer: Megatron-LM optimizer
923
+ lr_scheduler: Megatron-LM lr scheduler
924
+ """
925
+
926
+ def __init__(self, accelerator, model, optimizer, scheduler):
927
+ super().__init__()
928
+ self.module = model
929
+ self.base_model = model[0]
930
+ self.optimizer = optimizer
931
+ self.scheduler = scheduler
932
+ args = get_args()
933
+ if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None:
934
+ self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class(
935
+ args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs
936
+ )
937
+ elif args.model_type_name == "bert":
938
+ self.train_step_handler = BertTrainStep(args)
939
+ elif args.model_type_name == "gpt":
940
+ self.train_step_handler = GPTTrainStep(args)
941
+ elif args.model_type_name == "t5":
942
+ self.train_step_handler = T5TrainStep(args)
943
+ else:
944
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
945
+ self.optimizer.skipped_iter = False
946
+
947
+ # Tracking loss.
948
+ self.total_loss_dict = {}
949
+ self.eval_total_loss_dict = {}
950
+ self.iteration = 0
951
+ self.report_memory_flag = True
952
+ if args.tensorboard_dir is not None:
953
+ write_args_to_tensorboard()
954
+
955
+ def train(self):
956
+ for model_module in self.module:
957
+ model_module.train()
958
+ self.log_eval_results()
959
+
960
+ def eval(self):
961
+ for model_module in self.module:
962
+ model_module.eval()
963
+
964
+ def train_step(self, **batch_data):
965
+ """
966
+ Training step for Megatron-LM
967
+
968
+ Args:
969
+ batch_data (:obj:`dict`): The batch data to train on.
970
+ """
971
+
972
+ args = get_args()
973
+ timers = get_timers()
974
+
975
+ if len(batch_data) > 0:
976
+ data_chunks = []
977
+ if args.num_micro_batches > 1:
978
+ for i in range(0, args.num_micro_batches):
979
+ data_chunks.append(
980
+ {
981
+ k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size]
982
+ for k, v in batch_data.items()
983
+ }
984
+ )
985
+ else:
986
+ data_chunks = [batch_data]
987
+
988
+ if len(self.module) > 1:
989
+ batch_data_iterator = (
990
+ [iter(data_chunks) for _ in range(len(self.module))]
991
+ if len(batch_data) > 0
992
+ else [None] * len(self.module)
993
+ )
994
+ else:
995
+ batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None
996
+
997
+ # Set grad to zero.
998
+ if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp:
999
+ for partition in self.module:
1000
+ partition.zero_grad_buffer()
1001
+ self.optimizer.zero_grad()
1002
+
1003
+ # Forward pass.
1004
+ forward_backward_func = get_forward_backward_func()
1005
+ losses_reduced = forward_backward_func(
1006
+ self.train_step_handler.forward_step,
1007
+ batch_data_iterator,
1008
+ self.module,
1009
+ self.optimizer,
1010
+ None,
1011
+ forward_only=False,
1012
+ )
1013
+
1014
+ # Empty unused memory.
1015
+ if args.empty_unused_memory_level >= 1:
1016
+ torch.cuda.empty_cache()
1017
+
1018
+ # Reduce gradients.
1019
+ timers("backward-reduce-model-grads").start()
1020
+ self.optimizer.reduce_model_grads(args, timers)
1021
+ timers("backward-reduce-model-grads").stop()
1022
+
1023
+ # Update parameters.
1024
+ timers("optimizer").start()
1025
+ update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)
1026
+ timers("optimizer").stop()
1027
+
1028
+ # Gather params.
1029
+ if update_successful:
1030
+ timers("backward-gather-model-params").start()
1031
+ self.optimizer.gather_model_params(args, timers)
1032
+ timers("backward-gather-model-params").stop()
1033
+
1034
+ # Update learning rate.
1035
+ if update_successful:
1036
+ if self.scheduler is not None:
1037
+ increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size
1038
+ self.scheduler.step(increment=increment)
1039
+ skipped_iter = 0
1040
+ else:
1041
+ skipped_iter = 1
1042
+
1043
+ self.optimizer.skipped_iter = not update_successful
1044
+
1045
+ # Empty unused memory.
1046
+ if args.empty_unused_memory_level >= 2:
1047
+ torch.cuda.empty_cache()
1048
+
1049
+ args.consumed_train_samples += (
1050
+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
1051
+ )
1052
+
1053
+ if mpu.is_pipeline_last_stage(ignore_virtual=True):
1054
+ # Average loss across microbatches.
1055
+ loss_reduced = {}
1056
+ for key in losses_reduced[0]:
1057
+ losses_reduced_for_key = [x[key] for x in losses_reduced]
1058
+ if len(losses_reduced_for_key[0].shape) == 0:
1059
+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
1060
+ else:
1061
+ loss_reduced[key] = torch.concat(losses_reduced_for_key)
1062
+ return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
1063
+ return {}, skipped_iter, grad_norm, num_zeros_in_grad
1064
+
1065
+ def eval_step(self, **batch_data):
1066
+ """
1067
+ Evaluation step for Megatron-LM
1068
+
1069
+ Args:
1070
+ batch_data (:obj:`dict`): The batch data to evaluate on.
1071
+ """
1072
+
1073
+ args = get_args()
1074
+ data_chunks = []
1075
+ if args.num_micro_batches > 1:
1076
+ for i in range(0, args.num_micro_batches):
1077
+ data_chunks.append(
1078
+ {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()}
1079
+ )
1080
+ else:
1081
+ data_chunks = [batch_data]
1082
+
1083
+ if len(self.module) > 1:
1084
+ batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]
1085
+ else:
1086
+ batch_data_iterator = iter(data_chunks)
1087
+ forward_backward_func = get_forward_backward_func()
1088
+ loss_dicts = forward_backward_func(
1089
+ self.train_step_handler.forward_step,
1090
+ batch_data_iterator,
1091
+ self.module,
1092
+ optimizer=None,
1093
+ timers=None,
1094
+ forward_only=True,
1095
+ )
1096
+ # Empty unused memory
1097
+ if args.empty_unused_memory_level >= 1:
1098
+ torch.cuda.empty_cache()
1099
+
1100
+ args.consumed_valid_samples += (
1101
+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
1102
+ )
1103
+
1104
+ if mpu.is_pipeline_last_stage(ignore_virtual=True):
1105
+ # Average loss across microbatches.
1106
+ loss_reduced = {}
1107
+ for key in loss_dicts[0]:
1108
+ losses_reduced_for_key = [x[key] for x in loss_dicts]
1109
+ if len(losses_reduced_for_key[0].shape) == 0:
1110
+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
1111
+ else:
1112
+ loss_reduced[key] = torch.concat(losses_reduced_for_key)
1113
+ return loss_reduced
1114
+ else:
1115
+ return {}
1116
+
1117
+ def forward(self, **batch_data):
1118
+ # During training, we use train_step()
1119
+ # model(**batch_data) performs following operations by delegating it to `self.train_step`:
1120
+ # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism
1121
+ # 2. Set grad to zero.
1122
+ # 3. forward pass and backward pass using Pipeline Parallelism
1123
+ # 4. Empty unused memory.
1124
+ # 5. Reduce gradients.
1125
+ # 6. Update parameters.
1126
+ # 7. Gather params when using Distributed Optimizer (Data Parallelism).
1127
+ # 8. Update learning rate if scheduler is specified.
1128
+ # 9. Empty unused memory.
1129
+ # 10. Average loss across microbatches and across DP ranks.
1130
+ #
1131
+ # During evaluation, we use eval_step()
1132
+ args = get_args()
1133
+ if self.module[0].training:
1134
+ loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data)
1135
+ self.iteration += 1
1136
+ if args.tensorboard_dir is not None:
1137
+ # Logging.
1138
+ loss_scale = self.optimizer.get_loss_scale().item()
1139
+ params_norm = None
1140
+ if args.log_params_norm:
1141
+ params_norm = calc_params_l2_norm(self.model)
1142
+ self.report_memory_flag = training_log(
1143
+ loss_dict,
1144
+ self.total_loss_dict,
1145
+ self.optimizer.param_groups[0]["lr"],
1146
+ self.iteration,
1147
+ loss_scale,
1148
+ self.report_memory_flag,
1149
+ skipped_iter,
1150
+ grad_norm,
1151
+ params_norm,
1152
+ num_zeros_in_grad,
1153
+ )
1154
+ else:
1155
+ loss_dict = self.eval_step(**batch_data)
1156
+ if args.tensorboard_dir is not None:
1157
+ for key in loss_dict:
1158
+ self.eval_total_loss_dict[key] = (
1159
+ self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]
1160
+ )
1161
+ self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get(
1162
+ key + "_num_iters", torch.cuda.FloatTensor([0.0])
1163
+ ) + torch.cuda.FloatTensor([1.0])
1164
+
1165
+ loss = torch.tensor(0.0, device=args.local_rank)
1166
+ for key in loss_dict:
1167
+ if len(loss_dict[key].shape) == 0:
1168
+ loss += loss_dict[key]
1169
+
1170
+ logits = None
1171
+ if "logits" in loss_dict:
1172
+ logits = loss_dict["logits"]
1173
+ # loss = reduce(loss)
1174
+ if self.train_step_handler.model_output_class is not None:
1175
+ return self.train_step_handler.model_output_class(loss=loss, logits=logits)
1176
+ return loss
1177
+
1178
+ def log_eval_results(self):
1179
+ args = get_args()
1180
+ if args.tensorboard_dir is None or self.iteration == 0:
1181
+ return
1182
+ args = get_args()
1183
+ writer = get_tensorboard_writer()
1184
+ string = f"validation loss at iteration {self.iteration} | "
1185
+ for key in self.eval_total_loss_dict:
1186
+ if key.endswith("_num_iters"):
1187
+ continue
1188
+ value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"]
1189
+ string += f"{key} value: {value} | "
1190
+ ppl = math.exp(min(20, value.item()))
1191
+ if args.pretraining_flag:
1192
+ string += f"{key} PPL: {ppl} | "
1193
+ if writer:
1194
+ writer.add_scalar(f"{key} validation", value.item(), self.iteration)
1195
+ if args.pretraining_flag:
1196
+ writer.add_scalar(f"{key} validation ppl", ppl, self.iteration)
1197
+
1198
+ length = len(string) + 1
1199
+ print_rank_last("-" * length)
1200
+ print_rank_last(string)
1201
+ print_rank_last("-" * length)
1202
+ self.eval_total_loss_dict = {}
1203
+
1204
+ def save_checkpoint(self, output_dir):
1205
+ self.log_eval_results()
1206
+ args = get_args()
1207
+ args.save = output_dir
1208
+ torch.distributed.barrier()
1209
+ save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
1210
+ torch.distributed.barrier()
1211
+
1212
+ def load_checkpoint(self, input_dir):
1213
+ args = get_args()
1214
+ args.load = input_dir
1215
+ args.consumed_train_samples = 0
1216
+ args.consumed_valid_samples = 0
1217
+ torch.distributed.barrier()
1218
+ iteration = load_checkpoint(self.module, self.optimizer, self.scheduler)
1219
+ torch.distributed.barrier()
1220
+ self.iteration = iteration
1221
+ if args.fp16 and self.iteration == 0:
1222
+ self.optimizer.reload_model_params()
1223
+
1224
+ def megatron_generate(
1225
+ self,
1226
+ inputs,
1227
+ attention_mask=None,
1228
+ max_length=None,
1229
+ max_new_tokens=None,
1230
+ num_beams=None,
1231
+ temperature=None,
1232
+ top_k=None,
1233
+ top_p=None,
1234
+ length_penalty=None,
1235
+ **kwargs,
1236
+ ):
1237
+ """
1238
+ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along
1239
+ with sampling. Refer the Megatron-LM repo for more details
1240
+
1241
+ Args:
1242
+ inputs (torch.Tensor): input ids
1243
+ attention_mask (torch.Tensor, optional): attention mask. Defaults to None.
1244
+ max_length (int, optional): max length of the generated sequence. Defaults to None.
1245
+ Either this or max_new_tokens should be provided.
1246
+ max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None.
1247
+ Either this or max_length should be provided.
1248
+ num_beams (int, optional): number of beams to use for beam search. Defaults to None.
1249
+ temperature (float, optional): temperature for sampling. Defaults to 1.0.
1250
+ top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0.
1251
+ top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0.
1252
+ length_penalty (float, optional): length penalty for beam search. Defaults to None.
1253
+ kwargs: additional key-value arguments
1254
+ """
1255
+
1256
+ # checking if required arguments are passed
1257
+ args = get_args()
1258
+ if args.model_type_name != "gpt":
1259
+ raise NotImplementedError("Generate method is not implemented for this model")
1260
+
1261
+ if args.data_parallel_size > 1:
1262
+ raise ValueError("Generate method requires data parallelism to be 1")
1263
+
1264
+ if args.sequence_parallel:
1265
+ raise ValueError("Generate method requires sequence parallelism to be False")
1266
+
1267
+ if args.recompute_granularity is not None:
1268
+ raise ValueError("Checkpoint activations cannot be set for inference")
1269
+
1270
+ if args.vocab_file is None:
1271
+ raise ValueError("Vocab file is required for inference")
1272
+
1273
+ # Prepare inputs
1274
+ if max_length is None and max_new_tokens is None:
1275
+ raise ValueError("`max_length` or `max_new_tokens` are required for inference")
1276
+
1277
+ if temperature is None:
1278
+ temperature = 1.0
1279
+ elif not (0.0 < temperature <= 100.0):
1280
+ raise ValueError("temperature must be a positive number less than or equal to 100.0")
1281
+
1282
+ if top_k is None:
1283
+ top_k = 0
1284
+ elif not (0 <= top_k <= 1000):
1285
+ raise ValueError("top_k must be a positive number less than or equal to 1000")
1286
+
1287
+ if top_p is None:
1288
+ top_p = 0.0
1289
+ elif top_p > 0.0 and top_k > 0.0:
1290
+ raise ValueError("top_p and top_k sampling cannot be set together")
1291
+ else:
1292
+ if not (0.0 <= top_p <= 1.0):
1293
+ raise ValueError("top_p must be less than or equal to 1.0")
1294
+
1295
+ top_p_decay = kwargs.get("top_p_decay", 0.0)
1296
+ if not (0.0 <= top_p_decay <= 1.0):
1297
+ raise ValueError("top_p_decay must be less than or equal to 1.0")
1298
+
1299
+ top_p_bound = kwargs.get("top_p_bound", 0.0)
1300
+ if not (0.0 <= top_p_bound <= 1.0):
1301
+ raise ValueError("top_p_bound must be less than or equal to 1.0")
1302
+
1303
+ add_BOS = kwargs.get("add_BOS", False)
1304
+ if not (isinstance(add_BOS, bool)):
1305
+ raise ValueError("add_BOS must be a boolean")
1306
+
1307
+ beam_width = num_beams
1308
+ if beam_width is not None:
1309
+ if not isinstance(beam_width, int):
1310
+ raise ValueError("beam_width must be an integer")
1311
+ if beam_width < 1:
1312
+ raise ValueError("beam_width must be greater than 0")
1313
+ if inputs.shape[0] > 1:
1314
+ return "When doing beam_search, batch size must be 1"
1315
+
1316
+ tokenizer = get_tokenizer()
1317
+
1318
+ stop_token = kwargs.get("stop_token", tokenizer.eod)
1319
+ if stop_token is not None:
1320
+ if not isinstance(stop_token, int):
1321
+ raise ValueError("stop_token must be an integer")
1322
+
1323
+ if length_penalty is None:
1324
+ length_penalty = 1.0
1325
+
1326
+ sizes_list = None
1327
+ prompts_tokens_tensor = None
1328
+ prompts_length_tensor = None
1329
+ if torch.distributed.get_rank() == 0:
1330
+ # Get the prompts length.
1331
+ if attention_mask is None:
1332
+ prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])
1333
+ else:
1334
+ prompts_length_tensor = attention_mask.sum(axis=-1).cuda()
1335
+
1336
+ if max_new_tokens is None:
1337
+ max_new_tokens = max_length - inputs.shape[1]
1338
+ if max_new_tokens <= 0:
1339
+ raise ValueError("max_new_tokens must be greater than 0")
1340
+
1341
+ if add_BOS:
1342
+ max_length = max_new_tokens + inputs.shape[1] + 1
1343
+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels
1344
+ max_length = 4 * math.ceil(max_length / 4)
1345
+ max_new_tokens = max_length - (inputs.shape[1] + 1)
1346
+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1347
+ prompts_tokens_tensor = torch.concat(
1348
+ [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1
1349
+ )
1350
+ else:
1351
+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels
1352
+ max_length = max_new_tokens + inputs.shape[1]
1353
+ max_length = 4 * math.ceil(max_length / 4)
1354
+ max_new_tokens = max_length - inputs.shape[1]
1355
+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1356
+ prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)
1357
+
1358
+ # We need the sizes of these tensors for the boradcast
1359
+ sizes_list = [
1360
+ prompts_tokens_tensor.size(0), # Batch size
1361
+ prompts_tokens_tensor.size(1),
1362
+ ] # Sequence lenght
1363
+
1364
+ # First, broadcast the sizes.
1365
+ sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)
1366
+
1367
+ # Now that we have the sizes, we can boradcast the tokens
1368
+ # and length tensors.
1369
+ sizes = sizes_tensor.tolist()
1370
+ context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)
1371
+ context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)
1372
+
1373
+ # Run the inference
1374
+ random_seed = kwargs.get("random_seed", 0)
1375
+ torch.random.manual_seed(random_seed)
1376
+ unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module))
1377
+ if beam_width is not None:
1378
+ tokens, _ = beam_search_and_return_on_first_stage(
1379
+ unwrapped_model,
1380
+ context_tokens_tensor,
1381
+ context_length_tensor,
1382
+ beam_width,
1383
+ stop_token=stop_token,
1384
+ num_return_gen=1,
1385
+ length_penalty=length_penalty,
1386
+ )
1387
+ else:
1388
+ tokens, _, _ = generate_tokens_probs_and_return_on_first_stage(
1389
+ unwrapped_model,
1390
+ context_tokens_tensor,
1391
+ context_length_tensor,
1392
+ return_output_log_probs=False,
1393
+ top_k=top_k,
1394
+ top_p=top_p,
1395
+ top_p_decay=top_p_decay,
1396
+ top_p_bound=top_p_bound,
1397
+ temperature=temperature,
1398
+ use_eod_token_for_early_termination=True,
1399
+ )
1400
+ return tokens
1401
+
1402
+
1403
+ # other utilities
1404
+ def avg_losses_across_data_parallel_group(losses):
1405
+ """
1406
+ Average losses across data parallel group.
1407
+
1408
+ Args:
1409
+ losses (List[Tensor]): List of losses to average across data parallel group.
1410
+ """
1411
+
1412
+ return average_losses_across_data_parallel_group(losses)
1413
+
1414
+
1415
+ def gather_across_data_parallel_groups(tensor):
1416
+ """
1417
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.
1418
+
1419
+ Args:
1420
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
1421
+ The data to gather across data parallel ranks.
1422
+
1423
+ """
1424
+
1425
+ def _gpu_gather_one(tensor):
1426
+ if tensor.ndim == 0:
1427
+ tensor = tensor.clone()[None]
1428
+ output_tensors = [
1429
+ torch.empty_like(tensor)
1430
+ for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group()))
1431
+ ]
1432
+ torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())
1433
+ return torch.cat(output_tensors, dim=0)
1434
+
1435
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/memory.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A collection of utilities for ensuring that training can always occur. Heavily influenced by the
17
+ [toma](https://github.com/BlackHC/toma) library.
18
+ """
19
+
20
+ import functools
21
+ import gc
22
+ import inspect
23
+
24
+ import torch
25
+
26
+ from .imports import is_mlu_available, is_mps_available, is_npu_available, is_xpu_available
27
+
28
+
29
+ def release_memory(*objects):
30
+ """
31
+ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`.
32
+ Returned objects should be reassigned to the same variables.
33
+
34
+ Args:
35
+ objects (`Iterable`):
36
+ An iterable of objects
37
+ Returns:
38
+ A list of `None` objects to replace `objects`
39
+
40
+ Example:
41
+
42
+ ```python
43
+ >>> import torch
44
+ >>> from accelerate.utils import release_memory
45
+
46
+ >>> a = torch.ones(1000, 1000).cuda()
47
+ >>> b = torch.ones(1000, 1000).cuda()
48
+ >>> a, b = release_memory(a, b)
49
+ ```
50
+ """
51
+ if not isinstance(objects, list):
52
+ objects = list(objects)
53
+ for i in range(len(objects)):
54
+ objects[i] = None
55
+ gc.collect()
56
+ if is_xpu_available():
57
+ torch.xpu.empty_cache()
58
+ elif is_mlu_available():
59
+ torch.mlu.empty_cache()
60
+ elif is_npu_available():
61
+ torch.npu.empty_cache()
62
+ elif is_mps_available():
63
+ torch.mps.empty_cache()
64
+ else:
65
+ torch.cuda.empty_cache()
66
+ return objects
67
+
68
+
69
+ def should_reduce_batch_size(exception: Exception) -> bool:
70
+ """
71
+ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory
72
+
73
+ Args:
74
+ exception (`Exception`):
75
+ An exception
76
+ """
77
+ _statements = [
78
+ "CUDA out of memory.", # CUDA OOM
79
+ "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
80
+ "DefaultCPUAllocator: can't allocate memory", # CPU OOM
81
+ ]
82
+ if isinstance(exception, RuntimeError) and len(exception.args) == 1:
83
+ return any(err in exception.args[0] for err in _statements)
84
+ return False
85
+
86
+
87
+ def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):
88
+ """
89
+ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
90
+ CUDNN, the batch size is cut in half and passed to `function`
91
+
92
+ `function` must take in a `batch_size` parameter as its first argument.
93
+
94
+ Args:
95
+ function (`callable`, *optional*):
96
+ A function to wrap
97
+ starting_batch_size (`int`, *optional*):
98
+ The batch size to try and fit into memory
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> from accelerate.utils import find_executable_batch_size
104
+
105
+
106
+ >>> @find_executable_batch_size(starting_batch_size=128)
107
+ ... def train(batch_size, model, optimizer):
108
+ ... ...
109
+
110
+
111
+ >>> train(model, optimizer)
112
+ ```
113
+ """
114
+ if function is None:
115
+ return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
116
+
117
+ batch_size = starting_batch_size
118
+
119
+ def decorator(*args, **kwargs):
120
+ nonlocal batch_size
121
+ gc.collect()
122
+ if is_xpu_available():
123
+ torch.xpu.empty_cache()
124
+ elif is_mlu_available():
125
+ torch.mlu.empty_cache()
126
+ elif is_npu_available():
127
+ torch.npu.empty_cache()
128
+ else:
129
+ torch.cuda.empty_cache()
130
+ params = list(inspect.signature(function).parameters.keys())
131
+ # Guard against user error
132
+ if len(params) < (len(args) + 1):
133
+ arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])])
134
+ raise TypeError(
135
+ f"Batch size was passed into `{function.__name__}` as the first argument when called."
136
+ f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`"
137
+ )
138
+ while True:
139
+ if batch_size == 0:
140
+ raise RuntimeError("No executable batch size found, reached zero.")
141
+ try:
142
+ return function(batch_size, *args, **kwargs)
143
+ except Exception as e:
144
+ if should_reduce_batch_size(e):
145
+ gc.collect()
146
+ if is_xpu_available():
147
+ torch.xpu.empty_cache()
148
+ elif is_mlu_available():
149
+ torch.mlu.empty_cache()
150
+ elif is_npu_available():
151
+ torch.npu.empty_cache()
152
+ else:
153
+ torch.cuda.empty_cache()
154
+ batch_size //= 2
155
+ else:
156
+ raise
157
+
158
+ return decorator
env-llmeval/lib/python3.10/site-packages/accelerate/utils/modeling.py ADDED
@@ -0,0 +1,1800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import contextlib
16
+ import gc
17
+ import importlib
18
+ import inspect
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import shutil
24
+ import tempfile
25
+ import warnings
26
+ from collections import OrderedDict, defaultdict
27
+ from typing import Dict, List, Optional, Tuple, Union
28
+
29
+ import packaging
30
+ import torch
31
+ import torch.nn as nn
32
+
33
+ from ..state import AcceleratorState
34
+ from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
35
+ from .dataclasses import AutocastKwargs, CustomDtype, DistributedType
36
+ from .imports import (
37
+ is_mlu_available,
38
+ is_mps_available,
39
+ is_npu_available,
40
+ is_peft_available,
41
+ is_torch_xla_available,
42
+ is_xpu_available,
43
+ )
44
+ from .offload import load_offloaded_weight, offload_weight, save_offload_index
45
+ from .tqdm import is_tqdm_available, tqdm
46
+ from .versions import compare_versions
47
+
48
+
49
+ if is_npu_available(check_device=False):
50
+ import torch_npu # noqa: F401
51
+
52
+ if is_mlu_available(check_device=False):
53
+ import torch_mlu # noqa: F401
54
+
55
+ from safetensors import safe_open
56
+ from safetensors.torch import load_file as safe_load_file
57
+
58
+
59
+ WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ def is_peft_model(model):
65
+ from .other import extract_model_from_parallel
66
+
67
+ if is_peft_available():
68
+ from peft import PeftModel
69
+
70
+ return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel)
71
+
72
+
73
+ def check_device_same(first_device, second_device):
74
+ """
75
+ Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False`
76
+ for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same
77
+
78
+ Args:
79
+ first_device (`torch.device`):
80
+ First device to check
81
+ second_device (`torch.device`):
82
+ Second device to check
83
+ """
84
+ if first_device.type != second_device.type:
85
+ return False
86
+
87
+ if first_device.type == "cuda" and first_device.index is None:
88
+ # In case the first_device is a cuda device and have
89
+ # the index attribute set to `None`, default it to `0`
90
+ first_device = torch.device("cuda", index=0)
91
+
92
+ if second_device.type == "cuda" and second_device.index is None:
93
+ # In case the second_device is a cuda device and have
94
+ # the index attribute set to `None`, default it to `0`
95
+ second_device = torch.device("cuda", index=0)
96
+
97
+ return first_device == second_device
98
+
99
+
100
+ def convert_file_size_to_int(size: Union[int, str]):
101
+ """
102
+ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
103
+
104
+ Args:
105
+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
106
+
107
+ Example:
108
+
109
+ ```py
110
+ >>> convert_file_size_to_int("1MiB")
111
+ 1048576
112
+ ```
113
+ """
114
+ mem_size = -1
115
+ err_msg = (
116
+ f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')."
117
+ )
118
+ try:
119
+ if isinstance(size, int):
120
+ mem_size = size
121
+ elif size.upper().endswith("GIB"):
122
+ mem_size = int(float(size[:-3]) * (2**30))
123
+ elif size.upper().endswith("MIB"):
124
+ mem_size = int(float(size[:-3]) * (2**20))
125
+ elif size.upper().endswith("KIB"):
126
+ mem_size = int(float(size[:-3]) * (2**10))
127
+ elif size.upper().endswith("GB"):
128
+ int_size = int(float(size[:-2]) * (10**9))
129
+ mem_size = int_size // 8 if size.endswith("b") else int_size
130
+ elif size.upper().endswith("MB"):
131
+ int_size = int(float(size[:-2]) * (10**6))
132
+ mem_size = int_size // 8 if size.endswith("b") else int_size
133
+ elif size.upper().endswith("KB"):
134
+ int_size = int(float(size[:-2]) * (10**3))
135
+ mem_size = int_size // 8 if size.endswith("b") else int_size
136
+ except ValueError:
137
+ raise ValueError(err_msg)
138
+
139
+ if mem_size < 0:
140
+ raise ValueError(err_msg)
141
+ return mem_size
142
+
143
+
144
+ def dtype_byte_size(dtype: torch.dtype):
145
+ """
146
+ Returns the size (in bytes) occupied by one parameter of type `dtype`.
147
+
148
+ Example:
149
+
150
+ ```py
151
+ >>> dtype_byte_size(torch.float32)
152
+ 4
153
+ ```
154
+ """
155
+ if dtype == torch.bool:
156
+ return 1 / 8
157
+ elif dtype == CustomDtype.INT2:
158
+ return 1 / 4
159
+ elif dtype == CustomDtype.INT4:
160
+ return 1 / 2
161
+ elif dtype == CustomDtype.FP8:
162
+ return 1
163
+ bit_search = re.search(r"[^\d](\d+)$", str(dtype))
164
+ if bit_search is None:
165
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
166
+ bit_size = int(bit_search.groups()[0])
167
+ return bit_size // 8
168
+
169
+
170
+ def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
171
+ """
172
+ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
173
+ example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
174
+ guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
175
+ non-overlapping lifetimes may have the same id.
176
+ """
177
+ _SIZE = {
178
+ torch.int64: 8,
179
+ torch.float32: 4,
180
+ torch.int32: 4,
181
+ torch.bfloat16: 2,
182
+ torch.float16: 2,
183
+ torch.int16: 2,
184
+ torch.uint8: 1,
185
+ torch.int8: 1,
186
+ torch.bool: 1,
187
+ torch.float64: 8,
188
+ }
189
+ try:
190
+ storage_ptr = tensor.untyped_storage().data_ptr()
191
+ storage_size = tensor.untyped_storage().nbytes()
192
+ except Exception:
193
+ # Fallback for torch==1.10
194
+ try:
195
+ storage_ptr = tensor.storage().data_ptr()
196
+ storage_size = tensor.storage().size() * _SIZE[tensor.dtype]
197
+ except NotImplementedError:
198
+ # Fallback for meta storage
199
+ storage_ptr = 0
200
+ # On torch >=2.0 this is the tensor size
201
+ storage_size = tensor.nelement() * _SIZE[tensor.dtype]
202
+
203
+ return tensor.device, storage_ptr, storage_size
204
+
205
+
206
+ def shard_checkpoint(
207
+ state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
208
+ ):
209
+ """
210
+ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
211
+ given size.
212
+
213
+ The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
214
+ optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
215
+ limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
216
+ [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
217
+
218
+ <Tip warning={true}>
219
+
220
+ If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
221
+ have a size greater than `max_shard_size`.
222
+
223
+ </Tip>
224
+
225
+ Args:
226
+ state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
227
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
228
+ The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
229
+ (like `"5MB"`).
230
+ weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`):
231
+ The name of the model save file.
232
+ """
233
+ max_shard_size = convert_file_size_to_int(max_shard_size)
234
+
235
+ sharded_state_dicts = [{}]
236
+ last_block_size = 0
237
+ total_size = 0
238
+ storage_id_to_block = {}
239
+
240
+ for key, weight in state_dict.items():
241
+ # when bnb serialization is used the weights in the state dict can be strings
242
+ # check: https://github.com/huggingface/transformers/pull/24416 for more details
243
+ if isinstance(weight, str):
244
+ continue
245
+ else:
246
+ storage_id = id_tensor_storage(weight)
247
+
248
+ # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block`
249
+ if storage_id in storage_id_to_block:
250
+ block_id = storage_id_to_block[storage_id]
251
+ sharded_state_dicts[block_id][key] = weight
252
+ continue
253
+
254
+ weight_size = weight.numel() * dtype_byte_size(weight.dtype)
255
+
256
+ # If this weight is going to tip up over the maximal size, we split.
257
+ if last_block_size + weight_size > max_shard_size:
258
+ sharded_state_dicts.append({})
259
+ last_block_size = 0
260
+
261
+ sharded_state_dicts[-1][key] = weight
262
+ last_block_size += weight_size
263
+ total_size += weight_size
264
+ storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1
265
+
266
+ # If we only have one shard, we return it
267
+ if len(sharded_state_dicts) == 1:
268
+ return {weights_name: sharded_state_dicts[0]}, None
269
+
270
+ # Otherwise, let's build the index
271
+ weight_map = {}
272
+ shards = {}
273
+ for idx, shard in enumerate(sharded_state_dicts):
274
+ shard_file = weights_name.replace(".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin")
275
+ shard_file = shard_file.replace(
276
+ ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors"
277
+ )
278
+ shards[shard_file] = shard
279
+ for key in shard.keys():
280
+ weight_map[key] = shard_file
281
+
282
+ # Add the metadata
283
+ metadata = {"total_size": total_size}
284
+ index = {"metadata": metadata, "weight_map": weight_map}
285
+ return shards, index
286
+
287
+
288
+ def set_module_tensor_to_device(
289
+ module: nn.Module,
290
+ tensor_name: str,
291
+ device: Union[int, str, torch.device],
292
+ value: Optional[torch.Tensor] = None,
293
+ dtype: Optional[Union[str, torch.dtype]] = None,
294
+ fp16_statistics: Optional[torch.HalfTensor] = None,
295
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
296
+ ):
297
+ """
298
+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
299
+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).
300
+
301
+ Args:
302
+ module (`torch.nn.Module`):
303
+ The module in which the tensor we want to move lives.
304
+ tensor_name (`str`):
305
+ The full name of the parameter/buffer.
306
+ device (`int`, `str` or `torch.device`):
307
+ The device on which to set the tensor.
308
+ value (`torch.Tensor`, *optional*):
309
+ The value of the tensor (useful when going from the meta device to any other device).
310
+ dtype (`torch.dtype`, *optional*):
311
+ If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to
312
+ the dtype of the existing parameter in the model.
313
+ fp16_statistics (`torch.HalfTensor`, *optional*):
314
+ The list of fp16 statistics to set on the module, used for 8 bit model serialization.
315
+ tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`):
316
+ A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given
317
+ execution device, this parameter is useful to reuse the first available pointer of a shared weight on the
318
+ device for all others, instead of duplicating memory.
319
+ """
320
+ # Recurse if needed
321
+ if "." in tensor_name:
322
+ splits = tensor_name.split(".")
323
+ for split in splits[:-1]:
324
+ new_module = getattr(module, split)
325
+ if new_module is None:
326
+ raise ValueError(f"{module} has no attribute {split}.")
327
+ module = new_module
328
+ tensor_name = splits[-1]
329
+
330
+ if tensor_name not in module._parameters and tensor_name not in module._buffers:
331
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
332
+ is_buffer = tensor_name in module._buffers
333
+ old_value = getattr(module, tensor_name)
334
+
335
+ # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight
336
+ # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer.
337
+ if (
338
+ value is not None
339
+ and tied_params_map is not None
340
+ and value.data_ptr() in tied_params_map
341
+ and device in tied_params_map[value.data_ptr()]
342
+ ):
343
+ module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device]
344
+ return
345
+ elif (
346
+ tied_params_map is not None
347
+ and old_value.data_ptr() in tied_params_map
348
+ and device in tied_params_map[old_value.data_ptr()]
349
+ ):
350
+ module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device]
351
+ return
352
+
353
+ if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
354
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
355
+
356
+ if value is not None:
357
+ if old_value.shape != value.shape:
358
+ raise ValueError(
359
+ f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.'
360
+ )
361
+
362
+ if dtype is None:
363
+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model
364
+ value = value.to(old_value.dtype)
365
+ elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
366
+ value = value.to(dtype)
367
+
368
+ param = module._parameters[tensor_name] if tensor_name in module._parameters else None
369
+ param_cls = type(param)
370
+
371
+ device_quantization = None
372
+ with torch.no_grad():
373
+ # leave it on cpu first before moving them to cuda
374
+ # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0
375
+ if (
376
+ param is not None
377
+ and param.device.type != "cuda"
378
+ and torch.device(device).type == "cuda"
379
+ and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"]
380
+ ):
381
+ device_quantization = device
382
+ device = "cpu"
383
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
384
+ if is_npu_available() and isinstance(device, int):
385
+ device = f"npu:{device}"
386
+ elif is_mlu_available() and isinstance(device, int):
387
+ device = f"mlu:{device}"
388
+ if is_xpu_available() and isinstance(device, int):
389
+ device = f"xpu:{device}"
390
+ if value is None:
391
+ new_value = old_value.to(device)
392
+ if dtype is not None and device in ["meta", torch.device("meta")]:
393
+ if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
394
+ new_value = new_value.to(dtype)
395
+
396
+ if not is_buffer:
397
+ module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad)
398
+ elif isinstance(value, torch.Tensor):
399
+ new_value = value.to(device)
400
+ else:
401
+ new_value = torch.tensor(value, device=device)
402
+ if device_quantization is not None:
403
+ device = device_quantization
404
+ if is_buffer:
405
+ module._buffers[tensor_name] = new_value
406
+ elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device):
407
+ param_cls = type(module._parameters[tensor_name])
408
+ kwargs = module._parameters[tensor_name].__dict__
409
+ if param_cls.__name__ in ["Int8Params", "FP4Params"]:
410
+ if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32:
411
+ # downcast to fp16 if any - needed for 8bit serialization
412
+ new_value = new_value.to(torch.float16)
413
+ # quantize module that are going to stay on the cpu so that we offload quantized weights
414
+ if device == "cpu" and param_cls.__name__ == "Int8Params":
415
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu")
416
+ new_value.CB = new_value.CB.to("cpu")
417
+ new_value.SCB = new_value.SCB.to("cpu")
418
+ else:
419
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)
420
+ elif param_cls.__name__ in ["QTensor", "QBitsTensor"]:
421
+ new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device)
422
+ else:
423
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device)
424
+
425
+ module._parameters[tensor_name] = new_value
426
+ if fp16_statistics is not None:
427
+ module._parameters[tensor_name].SCB = fp16_statistics.to(device)
428
+ del fp16_statistics
429
+ # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight
430
+ if (
431
+ module.__class__.__name__ == "Linear8bitLt"
432
+ and getattr(module.weight, "SCB", None) is None
433
+ and str(module.weight.device) != "meta"
434
+ ):
435
+ # quantize only if necessary
436
+ device_index = torch.device(device).index if torch.device(device).type == "cuda" else None
437
+ if not getattr(module.weight, "SCB", None) and device_index is not None:
438
+ if module.bias is not None and module.bias.device.type != "meta":
439
+ # if a bias exists, we need to wait until the bias is set on the correct device
440
+ module = module.cuda(device_index)
441
+ elif module.bias is None:
442
+ # if no bias exists, we can quantize right away
443
+ module = module.cuda(device_index)
444
+ elif module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None:
445
+ # quantize only if necessary
446
+ device_index = torch.device(device).index if torch.device(device).type == "cuda" else None
447
+ if not getattr(module.weight, "quant_state", None) and device_index is not None:
448
+ module.weight = module.weight.cuda(device_index)
449
+ # clean pre and post foward hook
450
+ if is_npu_available():
451
+ torch.npu.empty_cache()
452
+ elif is_mlu_available():
453
+ torch.mlu.empty_cache()
454
+ elif is_xpu_available():
455
+ torch.xpu.empty_cache()
456
+ else:
457
+ torch.cuda.empty_cache()
458
+
459
+ # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in
460
+ # order to avoid duplicating memory, see above.
461
+ if (
462
+ tied_params_map is not None
463
+ and old_value.data_ptr() in tied_params_map
464
+ and device not in tied_params_map[old_value.data_ptr()]
465
+ ):
466
+ tied_params_map[old_value.data_ptr()][device] = new_value
467
+ elif (
468
+ value is not None
469
+ and tied_params_map is not None
470
+ and value.data_ptr() in tied_params_map
471
+ and device not in tied_params_map[value.data_ptr()]
472
+ ):
473
+ tied_params_map[value.data_ptr()][device] = new_value
474
+
475
+
476
+ def named_module_tensors(
477
+ module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False
478
+ ):
479
+ """
480
+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
481
+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
482
+
483
+ Args:
484
+ module (`torch.nn.Module`):
485
+ The module we want the tensors on.
486
+ include_buffer (`bool`, *optional*, defaults to `True`):
487
+ Whether or not to include the buffers in the result.
488
+ recurse (`bool`, *optional`, defaults to `False`):
489
+ Whether or not to go look in every submodule or just return the direct parameters and buffers.
490
+ remove_non_persistent (`bool`, *optional*, defaults to `False`):
491
+ Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers =
492
+ True
493
+ """
494
+ yield from module.named_parameters(recurse=recurse)
495
+
496
+ if include_buffers:
497
+ non_persistent_buffers = set()
498
+ if remove_non_persistent:
499
+ non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse)
500
+ for named_buffer in module.named_buffers(recurse=recurse):
501
+ name, _ = named_buffer
502
+ if name not in non_persistent_buffers:
503
+ yield named_buffer
504
+
505
+
506
+ def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
507
+ """
508
+ Gather all non persistent buffers of a given modules into a set
509
+
510
+ Args:
511
+ module (`nn.Module`):
512
+ The module we want the non persistent buffers on.
513
+ recurse (`bool`, *optional*, defaults to `False`):
514
+ Whether or not to go look in every submodule or just return the direct non persistent buffers.
515
+ """
516
+
517
+ non_persistent_buffers_set = module._non_persistent_buffers_set
518
+ if recurse:
519
+ for _, m in module.named_modules():
520
+ non_persistent_buffers_set |= m._non_persistent_buffers_set
521
+
522
+ return non_persistent_buffers_set
523
+
524
+
525
+ class FindTiedParametersResult(list):
526
+ """
527
+ This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
528
+ a list or on the `values` method as in the future this will be removed.
529
+ """
530
+
531
+ def __init__(self, *args, **kwargs):
532
+ super().__init__(*args, **kwargs)
533
+
534
+ def values(self):
535
+ # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
536
+ return sum([x[1:] for x in self], [])
537
+
538
+
539
+ def check_tied_parameters_in_config(model: nn.Module):
540
+ """
541
+ Check if there is any indication in the given model that some weights should be tied.
542
+
543
+ Args:
544
+ model (`torch.nn.Module`): The model to inspect
545
+
546
+ Returns:
547
+ bool: True if the model needs to have tied weights
548
+ """
549
+
550
+ # based on model.tie_weights() method
551
+ has_tied_word_embedding = False
552
+ has_tied_encoder_decoder = False
553
+ has_tied_module = False
554
+
555
+ if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]:
556
+ has_tied_word_embedding = (
557
+ hasattr(model, "config")
558
+ and getattr(model.config, "tie_word_embeddings", False)
559
+ and model.get_output_embeddings()
560
+ )
561
+ has_tied_encoder_decoder = (
562
+ hasattr(model, "config")
563
+ and getattr(model.config, "is_encoder_decoder", False)
564
+ and getattr(model.config, "tie_encoder_decoder", False)
565
+ )
566
+ has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules())
567
+
568
+ return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module])
569
+
570
+
571
+ def _get_param_device(param, device_map):
572
+ if param in device_map:
573
+ return device_map[param]
574
+ parent_param = ".".join(param.split(".")[:-1])
575
+ if parent_param == param:
576
+ raise ValueError(f"The `device_map` does not contain the module {param}.")
577
+ else:
578
+ return _get_param_device(parent_param, device_map)
579
+
580
+
581
+ def check_tied_parameters_on_same_device(tied_params, device_map):
582
+ """
583
+ Check if tied parameters are on the same device
584
+
585
+ Args:
586
+ tied_params (`List[List[str]]`):
587
+ A list of lists of parameter names being all tied together.
588
+
589
+ device_map (`Dict[str, Union[int, str, torch.device]]`):
590
+ A map that specifies where each submodule should go.
591
+
592
+ """
593
+ for tie_param in tied_params:
594
+ tie_param_devices = {}
595
+ for param in tie_param:
596
+ tie_param_devices[param] = _get_param_device(param, device_map)
597
+ if len(set(tie_param_devices.values())) > 1:
598
+ logger.warn(
599
+ f"Tied parameters are on different devices: {tie_param_devices}. "
600
+ "Please modify your custom device map or set `device_map='auto'`. "
601
+ )
602
+
603
+
604
+ def find_tied_parameters(model: nn.Module, **kwargs):
605
+ """
606
+ Find the tied parameters in a given model.
607
+
608
+ <Tip warning={true}>
609
+
610
+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore
611
+ them.
612
+
613
+ </Tip>
614
+
615
+ Args:
616
+ model (`torch.nn.Module`): The model to inspect.
617
+
618
+ Returns:
619
+ List[List[str]]: A list of lists of parameter names being all tied together.
620
+
621
+ Example:
622
+
623
+ ```py
624
+ >>> from collections import OrderedDict
625
+ >>> import torch.nn as nn
626
+
627
+ >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))]))
628
+ >>> model.linear2.weight = model.linear1.weight
629
+ >>> find_tied_parameters(model)
630
+ [['linear1.weight', 'linear2.weight']]
631
+ ```
632
+ """
633
+ # Initialize result and named_parameters before recursing.
634
+ named_parameters = kwargs.get("named_parameters", None)
635
+ prefix = kwargs.get("prefix", "")
636
+ result = kwargs.get("result", {})
637
+
638
+ if named_parameters is None:
639
+ named_parameters = {n: p for n, p in model.named_parameters()}
640
+ else:
641
+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`
642
+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial
643
+ # `named_parameters`.
644
+ for name, parameter in model.named_parameters():
645
+ full_name = name if prefix == "" else f"{prefix}.{name}"
646
+ if full_name not in named_parameters:
647
+ # When we find one, it has to be one of the existing parameters.
648
+ for new_name, new_param in named_parameters.items():
649
+ if new_param is parameter:
650
+ if new_name not in result:
651
+ result[new_name] = []
652
+ result[new_name].append(full_name)
653
+
654
+ # Once we have treated direct parameters, we move to the child modules.
655
+ for name, child in model.named_children():
656
+ child_name = name if prefix == "" else f"{prefix}.{name}"
657
+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)
658
+
659
+ return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()])
660
+
661
+
662
+ def retie_parameters(model, tied_params):
663
+ """
664
+ Reties tied parameters in a given model if the link was broken (for instance when adding hooks).
665
+
666
+ Args:
667
+ model (`torch.nn.Module`):
668
+ The model in which to retie parameters.
669
+ tied_params (`List[List[str]]`):
670
+ A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`.
671
+ """
672
+ for tied_group in tied_params:
673
+ param_to_tie = None
674
+ # two loops : the first one to set param_to_tie , the second one to change the values of tied_group
675
+ for param_name in tied_group:
676
+ module = model
677
+ splits = param_name.split(".")
678
+ for split in splits[:-1]:
679
+ module = getattr(module, split)
680
+ param = getattr(module, splits[-1])
681
+ if param_to_tie is None and param.device != torch.device("meta"):
682
+ param_to_tie = param
683
+ break
684
+ if param_to_tie is not None:
685
+ for param_name in tied_group:
686
+ module = model
687
+ splits = param_name.split(".")
688
+ for split in splits[:-1]:
689
+ module = getattr(module, split)
690
+ setattr(module, splits[-1], param_to_tie)
691
+
692
+
693
+ def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype:
694
+ """
695
+ Just does torch.dtype(dtype) if necessary.
696
+ """
697
+ if isinstance(dtype, str):
698
+ # We accept "torch.float16" or just "float16"
699
+ dtype = dtype.replace("torch.", "")
700
+ dtype = getattr(torch, dtype)
701
+ return dtype
702
+
703
+
704
+ def compute_module_sizes(
705
+ model: nn.Module,
706
+ dtype: Optional[Union[str, torch.device]] = None,
707
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
708
+ buffers_only: bool = False,
709
+ ):
710
+ """
711
+ Compute the size of each submodule of a given model.
712
+ """
713
+ if dtype is not None:
714
+ dtype = _get_proper_dtype(dtype)
715
+ dtype_size = dtype_byte_size(dtype)
716
+ if special_dtypes is not None:
717
+ special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()}
718
+ special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()}
719
+ module_sizes = defaultdict(int)
720
+
721
+ module_list = []
722
+
723
+ if not buffers_only:
724
+ module_list = named_module_tensors(model, recurse=True)
725
+ else:
726
+ module_list = model.named_buffers(recurse=True)
727
+
728
+ for name, tensor in module_list:
729
+ if special_dtypes is not None and name in special_dtypes:
730
+ size = tensor.numel() * special_dtypes_size[name]
731
+ elif dtype is None:
732
+ size = tensor.numel() * dtype_byte_size(tensor.dtype)
733
+ elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
734
+ # According to the code in set_module_tensor_to_device, these types won't be converted
735
+ # so use their original size here
736
+ size = tensor.numel() * dtype_byte_size(tensor.dtype)
737
+ else:
738
+ size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype))
739
+ name_parts = name.split(".")
740
+ for idx in range(len(name_parts) + 1):
741
+ module_sizes[".".join(name_parts[:idx])] += size
742
+
743
+ return module_sizes
744
+
745
+
746
+ def compute_module_total_buffer_size(
747
+ model: nn.Module,
748
+ dtype: Optional[Union[str, torch.device]] = None,
749
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
750
+ ):
751
+ """
752
+ Compute the total size of buffers in each submodule of a given model.
753
+ """
754
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True)
755
+ return module_sizes.get("", 0)
756
+
757
+
758
+ def get_max_layer_size(
759
+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]
760
+ ):
761
+ """
762
+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The
763
+ definition of a layer being:
764
+ - a module with no direct children (just parameters and buffers)
765
+ - a module whose class name is in the list `no_split_module_classes`
766
+
767
+ Args:
768
+ modules (`List[Tuple[str, torch.nn.Module]]`):
769
+ The list of named modules where we want to determine the maximum layer size.
770
+ module_sizes (`Dict[str, int]`):
771
+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).
772
+ no_split_module_classes (`List[str]`):
773
+ A list of class names for layers we don't want to be split.
774
+
775
+ Returns:
776
+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
777
+ """
778
+ max_size = 0
779
+ layer_names = []
780
+ modules_to_treat = modules.copy()
781
+ while len(modules_to_treat) > 0:
782
+ module_name, module = modules_to_treat.pop(0)
783
+ modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []
784
+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
785
+ # No splitting this one so we compare to the max_size
786
+ size = module_sizes[module_name]
787
+ if size > max_size:
788
+ max_size = size
789
+ layer_names = [module_name]
790
+ elif size == max_size:
791
+ layer_names.append(module_name)
792
+ else:
793
+ modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat
794
+ return max_size, layer_names
795
+
796
+
797
+ def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):
798
+ """
799
+ Get the maximum memory available if nothing is passed, converts string to int otherwise.
800
+ """
801
+ import psutil
802
+
803
+ if max_memory is None:
804
+ if not (torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_xpu_available()):
805
+ max_memory = {}
806
+
807
+ else:
808
+ # Make sure CUDA is initialized on each GPU to have the right memory info.
809
+ if is_npu_available():
810
+ for i in range(torch.npu.device_count()):
811
+ _ = torch.tensor(0, device=torch.device("npu", i))
812
+ max_memory = {i: torch.npu.mem_get_info(i)[0] for i in range(torch.npu.device_count())}
813
+ elif is_mlu_available():
814
+ for i in range(torch.mlu.device_count()):
815
+ _ = torch.tensor(0, device=torch.device("mlu", i))
816
+ max_memory = {i: torch.mlu.mem_get_info(i)[0] for i in range(torch.mlu.device_count())}
817
+ elif is_xpu_available():
818
+ for i in range(torch.xpu.device_count()):
819
+ _ = torch.tensor(0, device=torch.device("xpu", i))
820
+ max_memory = {i: torch.xpu.max_memory_allocated(i) for i in range(torch.xpu.device_count())}
821
+ else:
822
+ for i in range(torch.cuda.device_count()):
823
+ _ = torch.tensor([0], device=i)
824
+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}
825
+ # allocate everything in the mps device as the RAM is shared
826
+ if is_mps_available():
827
+ max_memory["mps"] = psutil.virtual_memory().available
828
+ else:
829
+ max_memory["cpu"] = psutil.virtual_memory().available
830
+ return max_memory
831
+
832
+ for key in max_memory:
833
+ if isinstance(max_memory[key], str):
834
+ max_memory[key] = convert_file_size_to_int(max_memory[key])
835
+
836
+ # Need to sort the device by type to make sure that we allocate the gpu first.
837
+ # As gpu/npu/xpu are represented by int, we need to sort them first.
838
+ gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)]
839
+ gpu_devices.sort()
840
+ # check if gpu/npu/xpu devices are available and if not, throw a warning
841
+ if is_npu_available():
842
+ num_devices = torch.npu.device_count()
843
+ elif is_mlu_available():
844
+ num_devices = torch.mlu.device_count()
845
+ elif is_xpu_available():
846
+ num_devices = torch.xpu.device_count()
847
+ else:
848
+ num_devices = torch.cuda.device_count()
849
+ for device in gpu_devices:
850
+ if device >= num_devices or device < 0:
851
+ logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}")
852
+ # Add the other devices in the preset order if they are available
853
+ all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()]
854
+ # Raise an error if a device is not recognized
855
+ for k in max_memory.keys():
856
+ if k not in all_devices:
857
+ raise ValueError(
858
+ f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'"
859
+ )
860
+ max_memory = {k: max_memory[k] for k in all_devices}
861
+
862
+ return max_memory
863
+
864
+
865
+ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""):
866
+ """
867
+ Cleans a device_map by grouping all submodules that go on the same device together.
868
+ """
869
+ # Get the value of the current module and if there is only one split across several keys, regroup it.
870
+ prefix = "" if module_name == "" else f"{module_name}."
871
+ values = [v for k, v in device_map.items() if k.startswith(prefix)]
872
+ if len(set(values)) == 1 and len(values) > 1:
873
+ for k in [k for k in device_map if k.startswith(prefix)]:
874
+ del device_map[k]
875
+ device_map[module_name] = values[0]
876
+
877
+ # Recurse over the children
878
+ children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)]
879
+ idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1
880
+ children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules)
881
+ for child in children_modules:
882
+ clean_device_map(device_map, module_name=child)
883
+
884
+ return device_map
885
+
886
+
887
+ def load_offloaded_weights(model, index, offload_folder):
888
+ """
889
+ Loads the weights from the offload folder into the model.
890
+
891
+ Args:
892
+ model (`torch.nn.Module`):
893
+ The model to load the weights into.
894
+ index (`dict`):
895
+ A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the
896
+ model.
897
+ offload_folder (`str`):
898
+ The folder where the offloaded weights are stored.
899
+ """
900
+ if index is None or len(index) == 0:
901
+ # Nothing to do
902
+ return
903
+ for param_name, metadata in index.items():
904
+ if "SCB" in param_name:
905
+ continue
906
+ fp16_statistics = None
907
+ if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys():
908
+ weight_name = param_name.replace("weight", "SCB")
909
+ fp16_statistics = load_offloaded_weight(
910
+ os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name]
911
+ )
912
+ tensor_file = os.path.join(offload_folder, f"{param_name}.dat")
913
+ weight = load_offloaded_weight(tensor_file, metadata)
914
+ set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics)
915
+
916
+
917
+ def get_balanced_memory(
918
+ model: nn.Module,
919
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
920
+ no_split_module_classes: Optional[List[str]] = None,
921
+ dtype: Optional[Union[str, torch.dtype]] = None,
922
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
923
+ low_zero: bool = False,
924
+ ):
925
+ """
926
+ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.
927
+
928
+ <Tip>
929
+
930
+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
931
+ meta device (as it would if initialized within the `init_empty_weights` context manager).
932
+
933
+ </Tip>
934
+
935
+ Args:
936
+ model (`torch.nn.Module`):
937
+ The model to analyze.
938
+ max_memory (`Dict`, *optional*):
939
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
940
+ Example: `max_memory={0: "1GB"}`.
941
+ no_split_module_classes (`List[str]`, *optional*):
942
+ A list of layer class names that should never be split across device (for instance any layer that has a
943
+ residual connection).
944
+ dtype (`str` or `torch.dtype`, *optional*):
945
+ If provided, the weights will be converted to that type when loaded.
946
+ special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*):
947
+ If provided, special dtypes to consider for some specific weights (will override dtype used as default for
948
+ all weights).
949
+ low_zero (`bool`, *optional*):
950
+ Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the
951
+ Transformers generate function).
952
+ """
953
+ # Get default / clean up max_memory
954
+ user_not_set_max_memory = max_memory is None
955
+ max_memory = get_max_memory(max_memory)
956
+
957
+ if is_npu_available():
958
+ num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0])
959
+ elif is_mlu_available():
960
+ num_devices = len([d for d in max_memory if torch.device(d).type == "mlu" and max_memory[d] > 0])
961
+ elif is_xpu_available():
962
+ num_devices = len(
963
+ [
964
+ d
965
+ for d in max_memory
966
+ if (
967
+ d != "cpu"
968
+ and (torch.device(d).type == "xpu" or torch.xpu.get_device_properties(d).dev_type == "gpu")
969
+ )
970
+ and max_memory[d] > 0
971
+ ]
972
+ )
973
+ else:
974
+ num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
975
+
976
+ if num_devices == 0:
977
+ return max_memory
978
+
979
+ if num_devices == 1:
980
+ # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
981
+ low_zero = False
982
+ # If user just asked us to handle memory usage, we should avoid OOM
983
+ if user_not_set_max_memory:
984
+ for key in max_memory.keys():
985
+ if isinstance(key, int):
986
+ max_memory[key] *= 0.9 # 90% is a good compromise
987
+ logger.info(
988
+ f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. "
989
+ "You can set `max_memory` in to a higher value to use more memory (at your own risk)."
990
+ )
991
+ break # only one device
992
+
993
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
994
+ per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices)
995
+
996
+ # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get
997
+ # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to
998
+ # add which is the biggest of:
999
+ # - the size of no split block (if applicable)
1000
+ # - the mean of the layer sizes
1001
+ if no_split_module_classes is None:
1002
+ no_split_module_classes = []
1003
+ elif not isinstance(no_split_module_classes, (list, tuple)):
1004
+ no_split_module_classes = [no_split_module_classes]
1005
+
1006
+ # Identify the size of the no_split_block modules
1007
+ if len(no_split_module_classes) > 0:
1008
+ no_split_children = {}
1009
+ for name, size in module_sizes.items():
1010
+ if name == "":
1011
+ continue
1012
+ submodule = model
1013
+ for submodule_name in name.split("."):
1014
+ submodule = getattr(submodule, submodule_name)
1015
+ class_name = submodule.__class__.__name__
1016
+ if class_name in no_split_module_classes and class_name not in no_split_children:
1017
+ no_split_children[class_name] = size
1018
+
1019
+ if set(no_split_children.keys()) == set(no_split_module_classes):
1020
+ break
1021
+ buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0
1022
+ else:
1023
+ buffer = 0
1024
+
1025
+ # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters
1026
+ leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
1027
+ module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves}
1028
+ # Once removed, leaves are the final modules.
1029
+ leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
1030
+ mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1))
1031
+ buffer = int(1.25 * max(buffer, mean_leaves))
1032
+ per_gpu += buffer
1033
+
1034
+ # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them)
1035
+ gpus_idx_list = list(
1036
+ sorted(
1037
+ device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0
1038
+ )
1039
+ )
1040
+ # The last device is left with max_memory just in case the buffer is not enough.
1041
+ for idx in gpus_idx_list[:-1]:
1042
+ max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx])
1043
+
1044
+ if low_zero:
1045
+ min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)]))
1046
+ max_memory[0] = min(min_zero, max_memory[0])
1047
+
1048
+ return max_memory
1049
+
1050
+
1051
+ def calculate_maximum_sizes(model: torch.nn.Module):
1052
+ "Computes the total size of the model and its largest layer"
1053
+ sizes = compute_module_sizes(model)
1054
+ # `transformers` models store this information for us
1055
+ no_split_modules = getattr(model, "_no_split_modules", None)
1056
+ if no_split_modules is None:
1057
+ no_split_modules = []
1058
+
1059
+ modules_to_treat = (
1060
+ list(model.named_parameters(recurse=False))
1061
+ + list(model.named_children())
1062
+ + list(model.named_buffers(recurse=False))
1063
+ )
1064
+ largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
1065
+ total_size = sizes[""]
1066
+ return total_size, largest_layer
1067
+
1068
+
1069
+ def infer_auto_device_map(
1070
+ model: nn.Module,
1071
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
1072
+ no_split_module_classes: Optional[List[str]] = None,
1073
+ dtype: Optional[Union[str, torch.dtype]] = None,
1074
+ special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None,
1075
+ verbose: bool = False,
1076
+ clean_result: bool = True,
1077
+ offload_buffers: bool = False,
1078
+ ):
1079
+ """
1080
+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,
1081
+ such that:
1082
+ - we don't exceed the memory available of any of the GPU.
1083
+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that
1084
+ has the largest size.
1085
+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
1086
+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
1087
+ that has the largest size.
1088
+
1089
+ <Tip>
1090
+
1091
+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
1092
+ meta device (as it would if initialized within the `init_empty_weights` context manager).
1093
+
1094
+ </Tip>
1095
+
1096
+ Args:
1097
+ model (`torch.nn.Module`):
1098
+ The model to analyze.
1099
+ max_memory (`Dict`, *optional*):
1100
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
1101
+ Example: `max_memory={0: "1GB"}`.
1102
+ no_split_module_classes (`List[str]`, *optional*):
1103
+ A list of layer class names that should never be split across device (for instance any layer that has a
1104
+ residual connection).
1105
+ dtype (`str` or `torch.dtype`, *optional*):
1106
+ If provided, the weights will be converted to that type when loaded.
1107
+ special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*):
1108
+ If provided, special dtypes to consider for some specific weights (will override dtype used as default for
1109
+ all weights).
1110
+ verbose (`bool`, *optional*, defaults to `False`):
1111
+ Whether or not to provide debugging statements as the function builds the device_map.
1112
+ clean_result (`bool`, *optional*, defaults to `True`):
1113
+ Clean the resulting device_map by grouping all submodules that go on the same device together.
1114
+ offload_buffers (`bool`, *optional*, defaults to `False`):
1115
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
1116
+ well as the parameters.
1117
+ """
1118
+ # Get default / clean up max_memory
1119
+ max_memory = get_max_memory(max_memory)
1120
+ if no_split_module_classes is None:
1121
+ no_split_module_classes = []
1122
+ elif not isinstance(no_split_module_classes, (list, tuple)):
1123
+ no_split_module_classes = [no_split_module_classes]
1124
+
1125
+ devices = list(max_memory.keys())
1126
+ if "disk" not in devices:
1127
+ devices.append("disk")
1128
+ gpus = [device for device in devices if device not in ["cpu", "disk"]]
1129
+
1130
+ # Devices that need to keep space for a potential offloaded layer.
1131
+ if "mps" in gpus:
1132
+ main_devices = ["mps"]
1133
+ elif len(gpus) > 0:
1134
+ main_devices = [gpus[0], "cpu"]
1135
+ else:
1136
+ main_devices = ["cpu"]
1137
+
1138
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
1139
+ tied_parameters = find_tied_parameters(model)
1140
+
1141
+ if check_tied_parameters_in_config(model) and len(tied_parameters) == 0:
1142
+ logger.warn(
1143
+ "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1144
+ )
1145
+
1146
+ device_map = OrderedDict()
1147
+ current_device = 0
1148
+ current_memory_used = 0
1149
+ device_memory_used = {}
1150
+ device_buffer_sizes = {}
1151
+
1152
+ # Direct submodules and parameters
1153
+ modules_to_treat = (
1154
+ list(model.named_parameters(recurse=False))
1155
+ + list(model.named_children())
1156
+ + list(model.named_buffers(recurse=False))
1157
+ )
1158
+ # Initialize maximum largest layer, to know which space to keep in memory
1159
+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
1160
+
1161
+ # Ready ? This is going to be a bit messy.
1162
+ while len(modules_to_treat) > 0:
1163
+ name, module = modules_to_treat.pop(0)
1164
+ if verbose:
1165
+ print(f"\nTreating module {name}.")
1166
+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.
1167
+ max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")]
1168
+ if len(max_layer_names) == 0:
1169
+ max_layer_size, max_layer_names = get_max_layer_size(
1170
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1171
+ module_sizes,
1172
+ no_split_module_classes,
1173
+ )
1174
+ # Assess size needed
1175
+ module_size = module_sizes[name]
1176
+
1177
+ # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module
1178
+ # and the other is not.
1179
+ # Note: If we are currently processing the name `compute.weight`, an other parameter named e.g. `compute.weight_submodule.parameter`
1180
+ # needs to be considered outside the current module, hence the check with additional dots.
1181
+ tied_param_goups = [
1182
+ tied_group
1183
+ for tied_group in tied_parameters
1184
+ if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group)
1185
+ ]
1186
+
1187
+ if verbose and len(tied_param_goups) > 0:
1188
+ print(f" Found the relevant tied param groups {tied_param_goups}")
1189
+
1190
+ # Then we keep track of all the parameters that are tied to the current module, but not in the current module
1191
+ tied_params = sum(
1192
+ [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_goups], []
1193
+ )
1194
+
1195
+ if verbose and len(tied_params) > 0:
1196
+ print(f" So those parameters need to be taken into account {tied_params}")
1197
+
1198
+ device = devices[current_device]
1199
+ current_max_size = max_memory[device] if device != "disk" else None
1200
+ current_memory_reserved = 0
1201
+ # Reduce max size available by the largest layer.
1202
+ if devices[current_device] in main_devices:
1203
+ current_max_size = current_max_size - max_layer_size
1204
+ current_memory_reserved = max_layer_size
1205
+ # Case 1 -> We're too big!
1206
+ if current_max_size is not None and current_memory_used + module_size > current_max_size:
1207
+ # Split or not split?
1208
+ modules_children = (
1209
+ []
1210
+ if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor)
1211
+ else list(module.named_children())
1212
+ )
1213
+ if verbose:
1214
+ print(
1215
+ f"Not enough space on {devices[current_device]} to put {name} (space available "
1216
+ f"{current_max_size - current_memory_used}, module size {module_size})."
1217
+ )
1218
+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
1219
+ # -> no split, we go to the next device
1220
+ if verbose:
1221
+ print("This module cannot be split, going to the next device.")
1222
+
1223
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1224
+ current_device += 1
1225
+ modules_to_treat = [(name, module)] + modules_to_treat
1226
+ current_memory_used = 0
1227
+ else:
1228
+ # -> split, we replace the module studied by its children + parameters
1229
+ if verbose:
1230
+ print(f"Splitting {name}.")
1231
+ modules_children = list(module.named_parameters(recurse=False)) + modules_children
1232
+ modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat
1233
+ # Update the max layer size.
1234
+ max_layer_size, max_layer_names = get_max_layer_size(
1235
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1236
+ module_sizes,
1237
+ no_split_module_classes,
1238
+ )
1239
+
1240
+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.
1241
+ elif len(tied_params) > 0:
1242
+ # First locate all tied modules
1243
+ tied_module_names = []
1244
+ tied_modules = []
1245
+ for tied_param in tied_params:
1246
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]
1247
+ tied_module_names.append(modules_to_treat[tied_module_index][0])
1248
+ tied_modules.append(modules_to_treat[tied_module_index][1])
1249
+ if verbose:
1250
+ print(
1251
+ f" It looks like {name} is going to fit on {devices[current_device]} but we have tied "
1252
+ f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}"
1253
+ )
1254
+
1255
+ # Let's see if it all fits first
1256
+ module_size_with_ties = module_size
1257
+ for tied_param, tied_module_name in zip(tied_params, tied_module_names):
1258
+ module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param]
1259
+
1260
+ if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size:
1261
+ # We really really fit!
1262
+ if verbose:
1263
+ print(f"Putting {name} and {tied_module_names} on {devices[current_device]}.")
1264
+ current_memory_used += module_size_with_ties
1265
+ device_map[name] = devices[current_device]
1266
+ for tied_module_name in tied_module_names:
1267
+ if tied_module_name in [m[0] for m in modules_to_treat]:
1268
+ # The module may have been removed by a previous iteration of this loop.
1269
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][
1270
+ 0
1271
+ ]
1272
+ modules_to_treat.pop(tied_module_index)
1273
+ device_map[tied_module_name] = devices[current_device]
1274
+
1275
+ if not offload_buffers and isinstance(module, nn.Module):
1276
+ current_buffer_size = compute_module_total_buffer_size(
1277
+ module, dtype=dtype, special_dtypes=special_dtypes
1278
+ )
1279
+ device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size
1280
+
1281
+ else:
1282
+ # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it
1283
+ # smaller or do we need to go on the next device?
1284
+ if verbose:
1285
+ print(
1286
+ f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space "
1287
+ f"available {current_max_size - current_memory_used}, needed size {module_size_with_ties})."
1288
+ )
1289
+ split_happened = False
1290
+ for tied_module_name, tied_module in zip(tied_module_names, tied_modules):
1291
+ tied_module_children = list(tied_module.named_children())
1292
+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
1293
+ # can't break this one.
1294
+ continue
1295
+
1296
+ if verbose:
1297
+ print(f"Splitting {tied_module_name}.")
1298
+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
1299
+ tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children]
1300
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0]
1301
+
1302
+ modules_to_treat = (
1303
+ [(name, module)]
1304
+ + modules_to_treat[:tied_module_index]
1305
+ + tied_module_children
1306
+ + modules_to_treat[tied_module_index + 1 :]
1307
+ )
1308
+ # Update the max layer size.
1309
+ max_layer_size, max_layer_names = get_max_layer_size(
1310
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1311
+ module_sizes,
1312
+ no_split_module_classes,
1313
+ )
1314
+ split_happened = True
1315
+ break
1316
+
1317
+ if not split_happened:
1318
+ # If the tied module is not split, we go to the next device
1319
+ if verbose:
1320
+ print("None of the tied module can be split, going to the next device.")
1321
+
1322
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1323
+ current_device += 1
1324
+ modules_to_treat = [(name, module)] + modules_to_treat
1325
+ current_memory_used = 0
1326
+
1327
+ else:
1328
+ if verbose:
1329
+ if current_max_size is None:
1330
+ print(f"Putting {name} (size={module_size}) on {devices[current_device]}.")
1331
+ else:
1332
+ print(
1333
+ f"Putting {name} (size={module_size}) on {devices[current_device]} "
1334
+ f"(available={current_max_size - current_memory_used})."
1335
+ )
1336
+ current_memory_used += module_size
1337
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1338
+ device_map[name] = devices[current_device]
1339
+
1340
+ if not offload_buffers and isinstance(module, nn.Module):
1341
+ current_buffer_size = compute_module_total_buffer_size(
1342
+ module, dtype=dtype, special_dtypes=special_dtypes
1343
+ )
1344
+ device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size
1345
+
1346
+ if clean_result:
1347
+ device_map = clean_device_map(device_map)
1348
+
1349
+ non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0)
1350
+ if non_gpu_buffer_size > 0 and not offload_buffers:
1351
+ is_buffer_fit_any_gpu = False
1352
+ for gpu_device, gpu_max_memory in max_memory.items():
1353
+ if gpu_device == "cpu" or gpu_device == "disk":
1354
+ continue
1355
+
1356
+ if not is_buffer_fit_any_gpu:
1357
+ gpu_memory_used = device_memory_used.get(gpu_device, 0)
1358
+
1359
+ if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used:
1360
+ is_buffer_fit_any_gpu = True
1361
+
1362
+ if len(gpus) > 0 and not is_buffer_fit_any_gpu:
1363
+ warnings.warn(
1364
+ f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does "
1365
+ f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using "
1366
+ f"offload_buffers=True."
1367
+ )
1368
+
1369
+ return device_map
1370
+
1371
+
1372
+ def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):
1373
+ """
1374
+ Checks a device map covers everything in a given model.
1375
+
1376
+ Args:
1377
+ model (`torch.nn.Module`): The model to check the device map against.
1378
+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
1379
+ """
1380
+ all_model_tensors = [name for name, _ in model.state_dict().items()]
1381
+ for module_name in device_map.keys():
1382
+ if module_name == "":
1383
+ all_model_tensors.clear()
1384
+ break
1385
+ else:
1386
+ all_model_tensors = [
1387
+ name
1388
+ for name in all_model_tensors
1389
+ if not name == module_name and not name.startswith(module_name + ".")
1390
+ ]
1391
+ if len(all_model_tensors) > 0:
1392
+ non_covered_params = ", ".join(all_model_tensors)
1393
+ raise ValueError(
1394
+ f"The device_map provided does not give any device for the following parameters: {non_covered_params}"
1395
+ )
1396
+
1397
+
1398
+ def load_state_dict(checkpoint_file, device_map=None):
1399
+ """
1400
+ Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the
1401
+ weights can be fast-loaded directly on the GPU.
1402
+
1403
+ Args:
1404
+ checkpoint_file (`str`): The path to the checkpoint to load.
1405
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
1406
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
1407
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
1408
+ """
1409
+ if checkpoint_file.endswith(".safetensors"):
1410
+ with safe_open(checkpoint_file, framework="pt") as f:
1411
+ metadata = f.metadata()
1412
+ weight_names = f.keys()
1413
+
1414
+ if metadata is None:
1415
+ logger.warn(
1416
+ f"The safetensors archive passed at {checkpoint_file} does not contain metadata. "
1417
+ "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata."
1418
+ )
1419
+ metadata = {"format": "pt"}
1420
+
1421
+ if metadata.get("format") not in ["pt", "tf", "flax"]:
1422
+ raise OSError(
1423
+ f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
1424
+ "you save your model with the `save_pretrained` method."
1425
+ )
1426
+ elif metadata["format"] != "pt":
1427
+ raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.")
1428
+ if device_map is None:
1429
+ return safe_load_file(checkpoint_file)
1430
+ else:
1431
+ # if we only have one device we can load everything directly
1432
+ if len(set(device_map.values())) == 1:
1433
+ return safe_load_file(checkpoint_file, device=list(device_map.values())[0])
1434
+
1435
+ devices = list(set(device_map.values()) - {"disk"})
1436
+ # cpu device should always exist as fallback option
1437
+ if "cpu" not in devices:
1438
+ devices.append("cpu")
1439
+
1440
+ # For each device, get the weights that go there
1441
+ device_weights = {device: [] for device in devices}
1442
+ for module_name, device in device_map.items():
1443
+ if device in devices:
1444
+ device_weights[device].extend(
1445
+ [k for k in weight_names if k == module_name or k.startswith(module_name + ".")]
1446
+ )
1447
+
1448
+ # all weights that haven't defined a device should be loaded on CPU
1449
+ device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])])
1450
+ tensors = {}
1451
+ if is_tqdm_available():
1452
+ progress_bar = tqdm(
1453
+ main_process_only=False,
1454
+ total=sum([len(device_weights[device]) for device in devices]),
1455
+ unit="w",
1456
+ smoothing=0,
1457
+ leave=False,
1458
+ )
1459
+ else:
1460
+ progress_bar = None
1461
+ for device in devices:
1462
+ target_device = device
1463
+
1464
+ if is_xpu_available():
1465
+ current_safetensors_version = packaging.version.parse(importlib.metadata.version("safetensors"))
1466
+
1467
+ if compare_versions(current_safetensors_version, "<", "0.4.2"):
1468
+ raise ModuleNotFoundError(
1469
+ f"You need at least safetensors 0.4.2 for Intel GPU, while you have {current_safetensors_version}"
1470
+ )
1471
+
1472
+ if isinstance(device, int):
1473
+ target_device = f"xpu:{device}"
1474
+
1475
+ with safe_open(checkpoint_file, framework="pt", device=target_device) as f:
1476
+ for key in device_weights[device]:
1477
+ if progress_bar is not None:
1478
+ progress_bar.set_postfix(dev=device, refresh=False)
1479
+ progress_bar.set_description(key)
1480
+ tensors[key] = f.get_tensor(key)
1481
+ if progress_bar is not None:
1482
+ progress_bar.update()
1483
+ if progress_bar is not None:
1484
+ progress_bar.close()
1485
+
1486
+ return tensors
1487
+ else:
1488
+ return torch.load(checkpoint_file, map_location=torch.device("cpu"))
1489
+
1490
+
1491
+ def get_state_dict_offloaded_model(model: nn.Module):
1492
+ """
1493
+ Returns the state dictionary for an offloaded model via iterative onloading
1494
+
1495
+ Args:
1496
+ model (`torch.nn.Module`):
1497
+ The offloaded model we want to save
1498
+ """
1499
+ from ..hooks import AlignDevicesHook
1500
+
1501
+ state_dict = {}
1502
+ placeholders = set()
1503
+ for name, module in model.named_modules():
1504
+ if name == "":
1505
+ continue
1506
+ if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload:
1507
+ original_device = module._hf_hook.execution_device
1508
+ # assign hook execution device to cpu
1509
+ module._hf_hook.execution_device = "cpu"
1510
+ # onload meta tensors to execution device
1511
+ try:
1512
+ module._hf_hook.pre_forward(module)
1513
+ except MemoryError:
1514
+ raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None
1515
+ module_state_dict = module.state_dict()
1516
+ # offload meta tensors from cpu
1517
+ module._hf_hook.post_forward(module, torch.tensor([]))
1518
+ # re-assign hook to original execution device
1519
+ module._hf_hook.execution_device = original_device
1520
+ else:
1521
+ module_state_dict = module.state_dict()
1522
+
1523
+ for key in module_state_dict:
1524
+ # ignore placeholder parameters that are still on the meta device
1525
+ if module_state_dict[key].device == torch.device("meta"):
1526
+ placeholders.add(name + f".{key}")
1527
+ continue
1528
+ params = module_state_dict[key]
1529
+ state_dict[name + f".{key}"] = params
1530
+ for key in placeholders.copy():
1531
+ if key in state_dict:
1532
+ placeholders.remove(key)
1533
+ if placeholders:
1534
+ logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}")
1535
+
1536
+ return state_dict
1537
+
1538
+
1539
+ def load_checkpoint_in_model(
1540
+ model: nn.Module,
1541
+ checkpoint: Union[str, os.PathLike],
1542
+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
1543
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
1544
+ dtype: Optional[Union[str, torch.dtype]] = None,
1545
+ offload_state_dict: bool = False,
1546
+ offload_buffers: bool = False,
1547
+ keep_in_fp32_modules: List[str] = None,
1548
+ offload_8bit_bnb: bool = False,
1549
+ strict: bool = False,
1550
+ ):
1551
+ """
1552
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
1553
+ loaded.
1554
+
1555
+ <Tip warning={true}>
1556
+
1557
+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To
1558
+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].
1559
+
1560
+ </Tip>
1561
+
1562
+ Args:
1563
+ model (`torch.nn.Module`):
1564
+ The model in which we want to load a checkpoint.
1565
+ checkpoint (`str` or `os.PathLike`):
1566
+ The folder checkpoint to load. It can be:
1567
+ - a path to a file containing a whole model state dict
1568
+ - a path to a `.json` file containing the index to a sharded checkpoint
1569
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
1570
+ - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file.
1571
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
1572
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
1573
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
1574
+ offload_folder (`str` or `os.PathLike`, *optional*):
1575
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
1576
+ dtype (`str` or `torch.dtype`, *optional*):
1577
+ If provided, the weights will be converted to that type when loaded.
1578
+ offload_state_dict (`bool`, *optional*, defaults to `False`):
1579
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
1580
+ the weight of the CPU state dict + the biggest shard does not fit.
1581
+ offload_buffers (`bool`, *optional*, defaults to `False`):
1582
+ Whether or not to include the buffers in the weights offloaded to disk.
1583
+ keep_in_fp32_modules(`List[str]`, *optional*):
1584
+ A list of the modules that we keep in `torch.float32` dtype.
1585
+ offload_8bit_bnb (`bool`, *optional*):
1586
+ Whether or not to enable offload of 8-bit modules on cpu/disk.
1587
+ strict (`bool`, *optional*, defaults to `False`):
1588
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
1589
+ state_dict.
1590
+
1591
+ """
1592
+ if offload_8bit_bnb:
1593
+ from .bnb import quantize_and_offload_8bit
1594
+
1595
+ tied_params = find_tied_parameters(model)
1596
+
1597
+ if check_tied_parameters_in_config(model) and len(tied_params) == 0:
1598
+ logger.warn(
1599
+ "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1600
+ )
1601
+ if device_map is not None:
1602
+ check_tied_parameters_on_same_device(tied_params, device_map)
1603
+
1604
+ if offload_folder is None and device_map is not None and "disk" in device_map.values():
1605
+ raise ValueError(
1606
+ "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
1607
+ )
1608
+ elif offload_folder is not None and device_map is not None and "disk" in device_map.values():
1609
+ os.makedirs(offload_folder, exist_ok=True)
1610
+
1611
+ if isinstance(dtype, str):
1612
+ # We accept "torch.float16" or just "float16"
1613
+ dtype = dtype.replace("torch.", "")
1614
+ dtype = getattr(torch, dtype)
1615
+
1616
+ checkpoint_files = None
1617
+ index_filename = None
1618
+ if os.path.isfile(checkpoint):
1619
+ if str(checkpoint).endswith(".json"):
1620
+ index_filename = checkpoint
1621
+ else:
1622
+ checkpoint_files = [checkpoint]
1623
+ elif os.path.isdir(checkpoint):
1624
+ # check if the whole state dict is present
1625
+ potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME]
1626
+ potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME]
1627
+ if len(potential_state_bin) == 1:
1628
+ checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])]
1629
+ elif len(potential_state_safetensor) == 1:
1630
+ checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])]
1631
+ else:
1632
+ # otherwise check for sharded checkpoints
1633
+ potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")]
1634
+ if len(potential_index) == 0:
1635
+ raise ValueError(
1636
+ f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file"
1637
+ )
1638
+ elif len(potential_index) == 1:
1639
+ index_filename = os.path.join(checkpoint, potential_index[0])
1640
+ else:
1641
+ raise ValueError(
1642
+ f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones."
1643
+ )
1644
+ else:
1645
+ raise ValueError(
1646
+ "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded "
1647
+ f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}."
1648
+ )
1649
+
1650
+ if index_filename is not None:
1651
+ checkpoint_folder = os.path.split(index_filename)[0]
1652
+ with open(index_filename) as f:
1653
+ index = json.loads(f.read())
1654
+
1655
+ if "weight_map" in index:
1656
+ index = index["weight_map"]
1657
+ checkpoint_files = sorted(list(set(index.values())))
1658
+ checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]
1659
+
1660
+ # Logic for missing/unexepected keys goes here.
1661
+
1662
+ offload_index = {}
1663
+ if offload_state_dict:
1664
+ state_dict_folder = tempfile.mkdtemp()
1665
+ state_dict_index = {}
1666
+
1667
+ unexpected_keys = set()
1668
+ model_keys = set(model.state_dict().keys())
1669
+ buffer_names = [name for name, _ in model.named_buffers()]
1670
+ for checkpoint_file in checkpoint_files:
1671
+ loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map)
1672
+ if device_map is None:
1673
+ model.load_state_dict(loaded_checkpoint, strict=strict)
1674
+ unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys)
1675
+ else:
1676
+ for param_name, param in loaded_checkpoint.items():
1677
+ # skip SCB parameter (for 8-bit serialization)
1678
+ if "SCB" in param_name:
1679
+ continue
1680
+
1681
+ if param_name not in model_keys:
1682
+ unexpected_keys.add(param_name)
1683
+ if not strict:
1684
+ continue # Skip loading this parameter.
1685
+
1686
+ module_name = param_name
1687
+
1688
+ while len(module_name) > 0 and module_name not in device_map:
1689
+ module_name = ".".join(module_name.split(".")[:-1])
1690
+ if module_name == "" and "" not in device_map:
1691
+ # TODO: group all errors and raise at the end.
1692
+ raise ValueError(f"{param_name} doesn't have any device set.")
1693
+ param_device = device_map[module_name]
1694
+ new_dtype = dtype
1695
+ if dtype is not None and torch.is_floating_point(param):
1696
+ if keep_in_fp32_modules is not None and dtype == torch.float16:
1697
+ proceed = False
1698
+ for key in keep_in_fp32_modules:
1699
+ if ((key in param_name) and (key + "." in param_name)) or key == param_name:
1700
+ proceed = True
1701
+ break
1702
+ if proceed:
1703
+ new_dtype = torch.float32
1704
+
1705
+ if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys():
1706
+ if param.dtype == torch.int8:
1707
+ fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")]
1708
+ else:
1709
+ fp16_statistics = None
1710
+
1711
+ if param_device == "disk":
1712
+ if offload_buffers or param_name not in buffer_names:
1713
+ if new_dtype is None:
1714
+ new_dtype = param.dtype
1715
+ if offload_8bit_bnb:
1716
+ quantize_and_offload_8bit(
1717
+ model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics
1718
+ )
1719
+ continue
1720
+ else:
1721
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype)
1722
+ offload_weight(param, param_name, offload_folder, index=offload_index)
1723
+ elif param_device == "cpu" and offload_state_dict:
1724
+ if new_dtype is None:
1725
+ new_dtype = param.dtype
1726
+ if offload_8bit_bnb:
1727
+ quantize_and_offload_8bit(
1728
+ model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics
1729
+ )
1730
+ else:
1731
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype)
1732
+ offload_weight(param, param_name, state_dict_folder, index=state_dict_index)
1733
+ else:
1734
+ set_module_tensor_to_device(
1735
+ model,
1736
+ param_name,
1737
+ param_device,
1738
+ value=param,
1739
+ dtype=new_dtype,
1740
+ fp16_statistics=fp16_statistics,
1741
+ )
1742
+
1743
+ # Force Python to clean up.
1744
+ del loaded_checkpoint
1745
+ gc.collect()
1746
+
1747
+ if not strict and len(unexpected_keys) > 0:
1748
+ logger.warning(
1749
+ f"Some weights of the model checkpoint at {checkpoint} were not used when"
1750
+ f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint."
1751
+ )
1752
+
1753
+ save_offload_index(offload_index, offload_folder)
1754
+
1755
+ # Load back offloaded state dict on CPU
1756
+ if offload_state_dict:
1757
+ load_offloaded_weights(model, state_dict_index, state_dict_folder)
1758
+ shutil.rmtree(state_dict_folder)
1759
+
1760
+ retie_parameters(model, tied_params)
1761
+
1762
+
1763
+ def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None):
1764
+ """
1765
+ Return a context manager for autocasting mixed precision
1766
+
1767
+ Args:
1768
+ native_amp (`bool`, *optional*, defaults to False):
1769
+ Whether mixed precision is actually enabled.
1770
+ cache_enabled (`bool`, *optional*, defaults to True):
1771
+ Whether the weight cache inside autocast should be enabled.
1772
+ """
1773
+ state = AcceleratorState()
1774
+ if autocast_kwargs is None:
1775
+ autocast_kwargs = {}
1776
+ else:
1777
+ autocast_kwargs = autocast_kwargs.to_kwargs()
1778
+ if native_amp:
1779
+ device_type = (
1780
+ "cuda"
1781
+ if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True))
1782
+ else state.device.type
1783
+ )
1784
+ if state.mixed_precision == "fp16":
1785
+ return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs)
1786
+ elif state.mixed_precision == "bf16" and state.distributed_type in [
1787
+ DistributedType.NO,
1788
+ DistributedType.MULTI_CPU,
1789
+ DistributedType.MULTI_GPU,
1790
+ DistributedType.MULTI_MLU,
1791
+ DistributedType.MULTI_NPU,
1792
+ DistributedType.MULTI_XPU,
1793
+ DistributedType.FSDP,
1794
+ DistributedType.XLA,
1795
+ ]:
1796
+ return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs)
1797
+ else:
1798
+ return torch.autocast(device_type=device_type, **autocast_kwargs)
1799
+ else:
1800
+ return contextlib.nullcontext()
env-llmeval/lib/python3.10/site-packages/accelerate/utils/offload.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from collections.abc import Mapping
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ from safetensors import safe_open
23
+
24
+
25
+ def offload_weight(weight, weight_name, offload_folder, index=None):
26
+ dtype = None
27
+ # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.
28
+ if str(weight.dtype) == "torch.bfloat16":
29
+ # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.
30
+ weight = weight.view(torch.int16)
31
+ dtype = "bfloat16"
32
+ array = weight.cpu().numpy()
33
+ tensor_file = os.path.join(offload_folder, f"{weight_name}.dat")
34
+ if index is not None:
35
+ if dtype is None:
36
+ dtype = str(array.dtype)
37
+ index[weight_name] = {"dtype": dtype, "shape": list(array.shape)}
38
+ if array.ndim == 0:
39
+ array = array[None]
40
+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape)
41
+ file_array[:] = array[:]
42
+ file_array.flush()
43
+ return index
44
+
45
+
46
+ def load_offloaded_weight(weight_file, weight_info):
47
+ shape = tuple(weight_info["shape"])
48
+ if shape == ():
49
+ # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
50
+ shape = (1,)
51
+
52
+ dtype = weight_info["dtype"]
53
+ if dtype == "bfloat16":
54
+ # NumPy does not support bfloat16 so this was saved as a int16
55
+ dtype = "int16"
56
+
57
+ weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
58
+
59
+ if len(weight_info["shape"]) == 0:
60
+ weight = weight[0]
61
+ weight = torch.tensor(weight)
62
+ if weight_info["dtype"] == "bfloat16":
63
+ weight = weight.view(torch.bfloat16)
64
+
65
+ return weight
66
+
67
+
68
+ def save_offload_index(index, offload_folder):
69
+ if index is None or len(index) == 0:
70
+ # Nothing to save
71
+ return
72
+
73
+ offload_index_file = os.path.join(offload_folder, "index.json")
74
+ if os.path.isfile(offload_index_file):
75
+ with open(offload_index_file, encoding="utf-8") as f:
76
+ current_index = json.load(f)
77
+ else:
78
+ current_index = {}
79
+ current_index.update(index)
80
+
81
+ with open(offload_index_file, "w", encoding="utf-8") as f:
82
+ json.dump(current_index, f, indent=2)
83
+
84
+
85
+ def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
86
+ """
87
+ Offload a state dict in a given folder.
88
+
89
+ Args:
90
+ save_dir (`str` or `os.PathLike`):
91
+ The directory in which to offload the state dict.
92
+ state_dict (`Dict[str, torch.Tensor]`):
93
+ The dictionary of tensors to offload.
94
+ """
95
+ os.makedirs(save_dir, exist_ok=True)
96
+ index = {}
97
+ for name, parameter in state_dict.items():
98
+ index = offload_weight(parameter, name, save_dir, index=index)
99
+
100
+ # Update index
101
+ save_offload_index(index, save_dir)
102
+
103
+
104
+ class PrefixedDataset(Mapping):
105
+ """
106
+ Will access keys in a given dataset by adding a prefix.
107
+
108
+ Args:
109
+ dataset (`Mapping`): Any map with string keys.
110
+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
111
+ """
112
+
113
+ def __init__(self, dataset: Mapping, prefix: str):
114
+ self.dataset = dataset
115
+ self.prefix = prefix
116
+
117
+ def __getitem__(self, key):
118
+ return self.dataset[f"{self.prefix}{key}"]
119
+
120
+ def __iter__(self):
121
+ return iter([key for key in self.dataset if key.startswith(self.prefix)])
122
+
123
+ def __len__(self):
124
+ return len(self.dataset)
125
+
126
+
127
+ class OffloadedWeightsLoader(Mapping):
128
+ """
129
+ A collection that loads weights stored in a given state dict or memory-mapped on disk.
130
+
131
+ Args:
132
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
133
+ A dictionary parameter name to tensor.
134
+ save_folder (`str` or `os.PathLike`, *optional*):
135
+ The directory in which the weights are stored (by `offload_state_dict` for instance).
136
+ index (`Dict`, *optional*):
137
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
138
+ to the index saved in `save_folder`.
139
+ """
140
+
141
+ def __init__(
142
+ self,
143
+ state_dict: Dict[str, torch.Tensor] = None,
144
+ save_folder: Optional[Union[str, os.PathLike]] = None,
145
+ index: Mapping = None,
146
+ device=None,
147
+ ):
148
+ if state_dict is None and save_folder is None and index is None:
149
+ raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
150
+
151
+ self.state_dict = {} if state_dict is None else state_dict
152
+ self.save_folder = save_folder
153
+ if index is None and save_folder is not None:
154
+ with open(os.path.join(save_folder, "index.json")) as f:
155
+ index = json.load(f)
156
+ self.index = {} if index is None else index
157
+ self.all_keys = list(self.state_dict.keys())
158
+ self.all_keys.extend([key for key in self.index if key not in self.all_keys])
159
+ self.device = device
160
+
161
+ def __getitem__(self, key: str):
162
+ # State dict gets priority
163
+ if key in self.state_dict:
164
+ return self.state_dict[key]
165
+ weight_info = self.index[key]
166
+ if weight_info.get("safetensors_file") is not None:
167
+ device = "cpu" if self.device is None else self.device
168
+ tensor = None
169
+ try:
170
+ with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f:
171
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
172
+ except TypeError:
173
+ # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
174
+ with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
175
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
176
+
177
+ if "dtype" in weight_info:
178
+ tensor = tensor.to(getattr(torch, weight_info["dtype"]))
179
+
180
+ if tensor.device != torch.device(device):
181
+ tensor = tensor.to(device)
182
+ return tensor
183
+
184
+ weight_file = os.path.join(self.save_folder, f"{key}.dat")
185
+ return load_offloaded_weight(weight_file, weight_info)
186
+
187
+ def __iter__(self):
188
+ return iter(self.all_keys)
189
+
190
+ def __len__(self):
191
+ return len(self.all_keys)
192
+
193
+
194
+ def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
195
+ """
196
+ Extract the sub state-dict corresponding to a list of given submodules.
197
+
198
+ Args:
199
+ state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
200
+ submodule_names (`List[str]`): The list of submodule names we want to extract.
201
+ """
202
+ result = {}
203
+ for module_name in submodule_names:
204
+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the
205
+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)
206
+ result.update(
207
+ {
208
+ key: param
209
+ for key, param in state_dict.items()
210
+ if key == module_name or key.startswith(module_name + ".")
211
+ }
212
+ )
213
+ return result
env-llmeval/lib/python3.10/site-packages/accelerate/utils/operations.py ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ A set of basic tensor ops compatible with tpu, gpu, and multigpu
16
+ """
17
+
18
+ import pickle
19
+ import warnings
20
+ from functools import update_wrapper, wraps
21
+ from typing import Any, Mapping
22
+
23
+ import torch
24
+
25
+ from ..state import PartialState
26
+ from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
27
+ from .dataclasses import DistributedType, TensorInformation
28
+ from .imports import (
29
+ is_npu_available,
30
+ is_torch_distributed_available,
31
+ is_torch_version,
32
+ is_torch_xla_available,
33
+ is_xpu_available,
34
+ )
35
+
36
+
37
+ if is_torch_xla_available():
38
+ import torch_xla.core.xla_model as xm
39
+
40
+ if is_torch_distributed_available():
41
+ from torch.distributed import ReduceOp
42
+
43
+
44
+ def is_torch_tensor(tensor):
45
+ return isinstance(tensor, torch.Tensor)
46
+
47
+
48
+ def is_torch_xpu_tensor(tensor):
49
+ return isinstance(
50
+ tensor,
51
+ torch.xpu.FloatTensor,
52
+ torch.xpu.ByteTensor,
53
+ torch.xpu.IntTensor,
54
+ torch.xpu.LongTensor,
55
+ torch.xpu.HalfTensor,
56
+ torch.xpu.DoubleTensor,
57
+ torch.xpu.BFloat16Tensor,
58
+ )
59
+
60
+
61
+ def is_tensor_information(tensor_info):
62
+ return isinstance(tensor_info, TensorInformation)
63
+
64
+
65
+ def is_namedtuple(data):
66
+ """
67
+ Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
68
+ `namedtuple` perfectly.
69
+ """
70
+ return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
71
+
72
+
73
+ def honor_type(obj, generator):
74
+ """
75
+ Cast a generator to the same type as obj (list, tuple, or namedtuple)
76
+ """
77
+ # Some objects may not be able to instantiate from a generator directly
78
+ if is_namedtuple(obj):
79
+ return type(obj)(*list(generator))
80
+ else:
81
+ return type(obj)(generator)
82
+
83
+
84
+ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
85
+ """
86
+ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
87
+
88
+ Args:
89
+ func (`callable`):
90
+ The function to recursively apply.
91
+ data (nested list/tuple/dictionary of `main_type`):
92
+ The data on which to apply `func`
93
+ *args:
94
+ Positional arguments that will be passed to `func` when applied on the unpacked data.
95
+ main_type (`type`, *optional*, defaults to `torch.Tensor`):
96
+ The base type of the objects to which apply `func`.
97
+ error_on_other_type (`bool`, *optional*, defaults to `False`):
98
+ Whether to return an error or not if after unpacking `data`, we get on an object that is not of type
99
+ `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
100
+ **kwargs (additional keyword arguments, *optional*):
101
+ Keyword arguments that will be passed to `func` when applied on the unpacked data.
102
+
103
+ Returns:
104
+ The same data structure as `data` with `func` applied to every object of type `main_type`.
105
+ """
106
+ if isinstance(data, (tuple, list)):
107
+ return honor_type(
108
+ data,
109
+ (
110
+ recursively_apply(
111
+ func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
112
+ )
113
+ for o in data
114
+ ),
115
+ )
116
+ elif isinstance(data, Mapping):
117
+ return type(data)(
118
+ {
119
+ k: recursively_apply(
120
+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
121
+ )
122
+ for k, v in data.items()
123
+ }
124
+ )
125
+ elif test_type(data):
126
+ return func(data, *args, **kwargs)
127
+ elif error_on_other_type:
128
+ raise TypeError(
129
+ f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of "
130
+ f"objects that are valid for `{test_type.__name__}` should be passed."
131
+ )
132
+ return data
133
+
134
+
135
+ def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
136
+ """
137
+ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
138
+
139
+ Args:
140
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
141
+ The data to send to a given device.
142
+ device (`torch.device`):
143
+ The device to send the data to.
144
+
145
+ Returns:
146
+ The same data structure as `tensor` with all tensors sent to the proper device.
147
+ """
148
+ if is_torch_tensor(tensor) or hasattr(tensor, "to"):
149
+ # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)).
150
+ if device == "npu":
151
+ device = "npu:0"
152
+ if device == "xpu":
153
+ device = "xpu:0"
154
+ # TODO: torch_mlu LongTensor.to(<int num>) has bugs, we will fix this later.
155
+ if is_torch_tensor(tensor) and tensor.device.type in ["mlu"] and tensor.dtype in [torch.int64]:
156
+ tensor = tensor.cpu()
157
+ try:
158
+ return tensor.to(device, non_blocking=non_blocking)
159
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
160
+ return tensor.to(device)
161
+ except AssertionError as error:
162
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
163
+ # This call is inside the try-block since is_npu_available is not supported by torch.compile.
164
+ if is_npu_available():
165
+ if isinstance(device, int):
166
+ device = f"npu:{device}"
167
+ else:
168
+ raise error
169
+ except Exception as error:
170
+ if is_xpu_available():
171
+ if isinstance(device, int):
172
+ device = f"xpu:{device}"
173
+ else:
174
+ raise error
175
+ try:
176
+ return tensor.to(device, non_blocking=non_blocking)
177
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
178
+ return tensor.to(device)
179
+ elif isinstance(tensor, (tuple, list)):
180
+ return honor_type(
181
+ tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
182
+ )
183
+ elif isinstance(tensor, Mapping):
184
+ if isinstance(skip_keys, str):
185
+ skip_keys = [skip_keys]
186
+ elif skip_keys is None:
187
+ skip_keys = []
188
+ return type(tensor)(
189
+ {
190
+ k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)
191
+ for k, t in tensor.items()
192
+ }
193
+ )
194
+ else:
195
+ return tensor
196
+
197
+
198
+ def get_data_structure(data):
199
+ """
200
+ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
201
+
202
+ Args:
203
+ data (nested list/tuple/dictionary of `torch.Tensor`):
204
+ The data to send to analyze.
205
+
206
+ Returns:
207
+ The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
208
+ """
209
+
210
+ def _get_data_structure(tensor):
211
+ return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
212
+
213
+ return recursively_apply(_get_data_structure, data)
214
+
215
+
216
+ def get_shape(data):
217
+ """
218
+ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
219
+
220
+ Args:
221
+ data (nested list/tuple/dictionary of `torch.Tensor`):
222
+ The data to send to analyze.
223
+
224
+ Returns:
225
+ The same data structure as `data` with lists of tensor shapes instead of tensors.
226
+ """
227
+
228
+ def _get_shape(tensor):
229
+ return list(tensor.shape)
230
+
231
+ return recursively_apply(_get_shape, data)
232
+
233
+
234
+ def initialize_tensors(data_structure):
235
+ """
236
+ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
237
+
238
+ Returns:
239
+ The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
240
+ """
241
+
242
+ def _initialize_tensor(tensor_info):
243
+ return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
244
+
245
+ return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
246
+
247
+
248
+ def find_batch_size(data):
249
+ """
250
+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
251
+
252
+ Args:
253
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
254
+
255
+ Returns:
256
+ `int`: The batch size.
257
+ """
258
+ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
259
+ raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
260
+
261
+ if isinstance(data, (tuple, list)):
262
+ return find_batch_size(data[0])
263
+ elif isinstance(data, Mapping):
264
+ for k in data.keys():
265
+ return find_batch_size(data[k])
266
+ elif not isinstance(data, torch.Tensor):
267
+ raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
268
+ return data.shape[0]
269
+
270
+
271
+ def ignorant_find_batch_size(data):
272
+ """
273
+ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised
274
+
275
+ Args:
276
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
277
+
278
+ Returns:
279
+ `int`: The batch size.
280
+ """
281
+ try:
282
+ return find_batch_size(data)
283
+ except (ValueError, TypeError):
284
+ pass
285
+ return None
286
+
287
+
288
+ def listify(data):
289
+ """
290
+ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
291
+
292
+ Args:
293
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
294
+
295
+ Returns:
296
+ The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
297
+ """
298
+
299
+ def _convert_to_list(tensor):
300
+ tensor = tensor.detach().cpu()
301
+ if tensor.dtype == torch.bfloat16:
302
+ # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
303
+ # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
304
+ # Until Numpy adds bfloat16, we must convert float32.
305
+ tensor = tensor.to(torch.float32)
306
+ return tensor.tolist()
307
+
308
+ return recursively_apply(_convert_to_list, data)
309
+
310
+
311
+ def _tpu_gather(tensor):
312
+ def _tpu_gather_one(tensor):
313
+ if tensor.ndim == 0:
314
+ tensor = tensor.clone()[None]
315
+
316
+ # Can only gather contiguous tensors
317
+ if not tensor.is_contiguous():
318
+ tensor = tensor.contiguous()
319
+ return xm.all_gather(tensor)
320
+
321
+ res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
322
+ xm.mark_step()
323
+ return res
324
+
325
+
326
+ def _gpu_gather(tensor):
327
+ state = PartialState()
328
+ if is_torch_version(">=", "1.13"):
329
+ gather_op = torch.distributed.all_gather_into_tensor
330
+ else:
331
+ gather_op = torch.distributed._all_gather_base
332
+
333
+ def _gpu_gather_one(tensor):
334
+ if tensor.ndim == 0:
335
+ tensor = tensor.clone()[None]
336
+
337
+ # Can only gather contiguous tensors
338
+ if not tensor.is_contiguous():
339
+ tensor = tensor.contiguous()
340
+
341
+ if state.backend is not None and state.backend != "gloo":
342
+ # We use `empty` as `all_gather_into_tensor` slightly
343
+ # differs from `all_gather` for better efficiency,
344
+ # and we rely on the number of items in the tensor
345
+ # rather than its direct shape
346
+ output_tensors = torch.empty(
347
+ state.num_processes * tensor.numel(),
348
+ dtype=tensor.dtype,
349
+ device=state.device,
350
+ )
351
+ gather_op(output_tensors, tensor)
352
+ return output_tensors.view(-1, *tensor.size()[1:])
353
+ else:
354
+ # a backend of `None` is always CPU
355
+ # also gloo does not support `all_gather_into_tensor`,
356
+ # which will result in a larger memory overhead for the op
357
+ output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
358
+ torch.distributed.all_gather(output_tensors, tensor)
359
+ return torch.cat(output_tensors, dim=0)
360
+
361
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
362
+
363
+
364
+ class DistributedOperationException(Exception):
365
+ """
366
+ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
367
+ tensors.
368
+ """
369
+
370
+ pass
371
+
372
+
373
+ def verify_operation(function):
374
+ """
375
+ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
376
+ """
377
+
378
+ @wraps(function)
379
+ def wrapper(*args, **kwargs):
380
+ if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
381
+ return function(*args, **kwargs)
382
+ operation = f"{function.__module__}.{function.__name__}"
383
+ if "tensor" in kwargs:
384
+ tensor = kwargs["tensor"]
385
+ else:
386
+ tensor = args[0]
387
+ if PartialState().device.type != find_device(tensor).type:
388
+ raise DistributedOperationException(
389
+ f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. "
390
+ f"Please move it to the {PartialState().device.type} before calling {operation}."
391
+ )
392
+ shapes = get_shape(tensor)
393
+ output = gather_object([shapes])
394
+ if output[0] is not None:
395
+ are_same = output.count(output[0]) == len(output)
396
+ if not are_same:
397
+ process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)])
398
+ raise DistributedOperationException(
399
+ f"Cannot apply desired operation due to shape mismatches. "
400
+ "All shapes across devices must be valid."
401
+ f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
402
+ )
403
+ return function(*args, **kwargs)
404
+
405
+ return wrapper
406
+
407
+
408
+ def chained_operation(function):
409
+ """
410
+ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
411
+ `DistributedOperationException`.
412
+ """
413
+
414
+ @wraps(function)
415
+ def wrapper(*args, **kwargs):
416
+ try:
417
+ return function(*args, **kwargs)
418
+ except DistributedOperationException as e:
419
+ operation = f"{function.__module__}.{function.__name__}"
420
+ raise DistributedOperationException(
421
+ f"Error found while calling `{operation}`. Please see the earlier error for more details."
422
+ ) from e
423
+
424
+ return wrapper
425
+
426
+
427
+ @verify_operation
428
+ def gather(tensor):
429
+ """
430
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
431
+
432
+ Args:
433
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
434
+ The data to gather.
435
+
436
+ Returns:
437
+ The same data structure as `tensor` with all tensors sent to the proper device.
438
+ """
439
+ if PartialState().distributed_type == DistributedType.XLA:
440
+ return _tpu_gather(tensor)
441
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
442
+ return _gpu_gather(tensor)
443
+ else:
444
+ return tensor
445
+
446
+
447
+ def _gpu_gather_object(object: Any):
448
+ output_objects = [None for _ in range(PartialState().num_processes)]
449
+ torch.distributed.all_gather_object(output_objects, object)
450
+ # all_gather_object returns a list of lists, so we need to flatten it
451
+ return [x for y in output_objects for x in y]
452
+
453
+
454
+ def gather_object(object: Any):
455
+ """
456
+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
457
+
458
+ Args:
459
+ object (nested list/tuple/dictionary of picklable object):
460
+ The data to gather.
461
+
462
+ Returns:
463
+ The same data structure as `object` with all the objects sent to every device.
464
+ """
465
+ if PartialState().distributed_type == DistributedType.XLA:
466
+ raise NotImplementedError("gather objects in TPU is not supported")
467
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
468
+ return _gpu_gather_object(object)
469
+ else:
470
+ return object
471
+
472
+
473
+ def _gpu_broadcast(data, src=0):
474
+ def _gpu_broadcast_one(tensor, src=0):
475
+ torch.distributed.broadcast(tensor, src=src)
476
+ return tensor
477
+
478
+ return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
479
+
480
+
481
+ def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
482
+ if isinstance(tensor, (list, tuple)):
483
+ return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
484
+ elif isinstance(tensor, Mapping):
485
+ return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
486
+ return xm.mesh_reduce(name, tensor, lambda x: x[src])
487
+
488
+
489
+ TENSOR_TYPE_TO_INT = {
490
+ torch.float: 1,
491
+ torch.double: 2,
492
+ torch.half: 3,
493
+ torch.bfloat16: 4,
494
+ torch.uint8: 5,
495
+ torch.int8: 6,
496
+ torch.int16: 7,
497
+ torch.int32: 8,
498
+ torch.int64: 9,
499
+ torch.bool: 10,
500
+ }
501
+
502
+ TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()}
503
+
504
+
505
+ def gather_tensor_shape(tensor):
506
+ """
507
+ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
508
+ """
509
+ # Allocate 80 bytes to store the shape
510
+ max_tensor_dimension = 2**20
511
+ state = PartialState()
512
+ base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device)
513
+
514
+ # Since PyTorch can't just send a tensor to another GPU without
515
+ # knowing its size, we store the size of the tensor with data
516
+ # in an allocation
517
+ if tensor is not None:
518
+ shape = tensor.shape
519
+ tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype]
520
+ base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int)
521
+ # Perform a reduction to copy the size data onto all GPUs
522
+ base_tensor = reduce(base_tensor, reduction="sum")
523
+ base_tensor = base_tensor[base_tensor.nonzero()]
524
+ # The last non-zero data contains the coded dtype the source tensor is
525
+ dtype = int(base_tensor[-1:][0])
526
+ base_tensor = base_tensor[:-1]
527
+ return base_tensor, dtype
528
+
529
+
530
+ def copy_tensor_to_devices(tensor=None) -> torch.Tensor:
531
+ """
532
+ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as
533
+ each worker doesn't need to know its shape when used (and tensor can be `None`)
534
+
535
+ Args:
536
+ tensor (`torch.tensor`):
537
+ The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest
538
+ should be `None`.
539
+ """
540
+ state = PartialState()
541
+ shape, dtype = gather_tensor_shape(tensor)
542
+ if tensor is None:
543
+ tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device)
544
+ return reduce(tensor, reduction="sum")
545
+
546
+
547
+ @verify_operation
548
+ def broadcast(tensor, from_process: int = 0):
549
+ """
550
+ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
551
+
552
+ Args:
553
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
554
+ The data to gather.
555
+ from_process (`int`, *optional*, defaults to 0):
556
+ The process from which to send the data
557
+
558
+ Returns:
559
+ The same data structure as `tensor` with all tensors broadcasted to the proper device.
560
+ """
561
+ if PartialState().distributed_type == DistributedType.XLA:
562
+ return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast")
563
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
564
+ return _gpu_broadcast(tensor, src=from_process)
565
+ else:
566
+ return tensor
567
+
568
+
569
+ def broadcast_object_list(object_list, from_process: int = 0):
570
+ """
571
+ Broadcast a list of picklable objects form one process to the others.
572
+
573
+ Args:
574
+ object_list (list of picklable objects):
575
+ The list of objects to broadcast. This list will be modified inplace.
576
+ from_process (`int`, *optional*, defaults to 0):
577
+ The process from which to send the data.
578
+
579
+ Returns:
580
+ The same list containing the objects from process 0.
581
+ """
582
+ if PartialState().distributed_type == DistributedType.XLA:
583
+ for i, obj in enumerate(object_list):
584
+ object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process])
585
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
586
+ torch.distributed.broadcast_object_list(object_list, src=from_process)
587
+ return object_list
588
+
589
+
590
+ def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
591
+ """
592
+ Recursively takes a slice in a nested list/tuple/dictionary of tensors.
593
+
594
+ Args:
595
+ data (nested list/tuple/dictionary of `torch.Tensor`):
596
+ The data to slice.
597
+ tensor_slice (`slice`):
598
+ The slice to take.
599
+
600
+ Returns:
601
+ The same data structure as `data` with all the tensors slices.
602
+ """
603
+
604
+ def _slice_tensor(tensor, tensor_slice):
605
+ return tensor[tensor_slice]
606
+
607
+ return recursively_apply(_slice_tensor, data, tensor_slice)
608
+
609
+
610
+ def concatenate(data, dim=0):
611
+ """
612
+ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
613
+
614
+ Args:
615
+ data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
616
+ The data to concatenate.
617
+ dim (`int`, *optional*, defaults to 0):
618
+ The dimension on which to concatenate.
619
+
620
+ Returns:
621
+ The same data structure as `data` with all the tensors concatenated.
622
+ """
623
+ if isinstance(data[0], (tuple, list)):
624
+ return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))
625
+ elif isinstance(data[0], Mapping):
626
+ return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})
627
+ elif not isinstance(data[0], torch.Tensor):
628
+ raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
629
+ return torch.cat(data, dim=dim)
630
+
631
+
632
+ class CannotPadNestedTensorWarning(UserWarning):
633
+ pass
634
+
635
+
636
+ @chained_operation
637
+ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
638
+ """
639
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
640
+ can safely be gathered.
641
+
642
+ Args:
643
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
644
+ The data to gather.
645
+ dim (`int`, *optional*, defaults to 0):
646
+ The dimension on which to pad.
647
+ pad_index (`int`, *optional*, defaults to 0):
648
+ The value with which to pad.
649
+ pad_first (`bool`, *optional*, defaults to `False`):
650
+ Whether to pad at the beginning or the end.
651
+ """
652
+
653
+ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
654
+ if getattr(tensor, "is_nested", False):
655
+ warnings.warn(
656
+ "Cannot pad nested tensors without more information. Leaving unprocessed.",
657
+ CannotPadNestedTensorWarning,
658
+ )
659
+ return tensor
660
+ if dim >= len(tensor.shape):
661
+ return tensor
662
+
663
+ # Gather all sizes
664
+ size = torch.tensor(tensor.shape, device=tensor.device)[None]
665
+ sizes = gather(size).cpu()
666
+ # Then pad to the maximum size
667
+ max_size = max(s[dim] for s in sizes)
668
+ if max_size == tensor.shape[dim]:
669
+ return tensor
670
+
671
+ old_size = tensor.shape
672
+ new_size = list(old_size)
673
+ new_size[dim] = max_size
674
+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
675
+ if pad_first:
676
+ indices = tuple(
677
+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))
678
+ )
679
+ else:
680
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
681
+ new_tensor[indices] = tensor
682
+ return new_tensor
683
+
684
+ return recursively_apply(
685
+ _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
686
+ )
687
+
688
+
689
+ def pad_input_tensors(tensor, batch_size, num_processes, dim=0):
690
+ """
691
+ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions.
692
+
693
+ New tensors are just the last input repeated.
694
+
695
+ E.g.:
696
+ Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
697
+
698
+ """
699
+
700
+ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0):
701
+ remainder = batch_size // num_processes
702
+ last_inputs = batch_size - (remainder * num_processes)
703
+ if batch_size // num_processes == 0:
704
+ to_pad = num_processes - batch_size
705
+ else:
706
+ to_pad = num_processes - (batch_size // num_processes)
707
+ # In the rare case that `to_pad` is negative,
708
+ # we need to pad the last inputs - the found `to_pad`
709
+ if last_inputs > to_pad & to_pad < 1:
710
+ to_pad = last_inputs - to_pad
711
+ old_size = tensor.shape
712
+ new_size = list(old_size)
713
+ new_size[0] = batch_size + to_pad
714
+ new_tensor = tensor.new_zeros(tuple(new_size))
715
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
716
+ new_tensor[indices] = tensor
717
+ return new_tensor
718
+
719
+ return recursively_apply(
720
+ _pad_input_tensors,
721
+ tensor,
722
+ error_on_other_type=True,
723
+ batch_size=batch_size,
724
+ num_processes=num_processes,
725
+ dim=dim,
726
+ )
727
+
728
+
729
+ @verify_operation
730
+ def reduce(tensor, reduction="mean", scale=1.0):
731
+ """
732
+ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
733
+ mean of a given operation.
734
+
735
+ Args:
736
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
737
+ The data to reduce.
738
+ reduction (`str`, *optional*, defaults to `"mean"`):
739
+ A reduction method. Can be of "mean", "sum", or "none"
740
+ scale (`float`, *optional*):
741
+ A default scaling value to be applied after the reduce, only valied on XLA.
742
+
743
+ Returns:
744
+ The same data structure as `data` with all the tensors reduced.
745
+ """
746
+
747
+ def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
748
+ state = PartialState()
749
+ cloned_tensor = tensor.clone()
750
+ if state.distributed_type == DistributedType.NO:
751
+ return cloned_tensor
752
+ if state.distributed_type == DistributedType.XLA:
753
+ # Some processes may have different HLO graphs than other
754
+ # processes, for example in the breakpoint API
755
+ # accelerator.set_trigger(). Use mark_step to make HLOs
756
+ # the same on all processes.
757
+ xm.mark_step()
758
+ xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale)
759
+ xm.mark_step()
760
+ elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
761
+ torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
762
+ if reduction == "mean":
763
+ cloned_tensor /= state.num_processes
764
+ return cloned_tensor
765
+
766
+ return recursively_apply(
767
+ _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
768
+ )
769
+
770
+
771
+ def convert_to_fp32(tensor):
772
+ """
773
+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
774
+
775
+ Args:
776
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
777
+ The data to convert from FP16/BF16 to FP32.
778
+
779
+ Returns:
780
+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
781
+ """
782
+
783
+ def _convert_to_fp32(tensor):
784
+ return tensor.float()
785
+
786
+ def _is_fp16_bf16_tensor(tensor):
787
+ return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in (
788
+ torch.float16,
789
+ torch.bfloat16,
790
+ )
791
+
792
+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
793
+
794
+
795
+ class ConvertOutputsToFp32:
796
+ """
797
+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
798
+ precision will be convert back to FP32.
799
+
800
+ Args:
801
+ model_forward (`Callable`):
802
+ The function which outputs we want to treat.
803
+
804
+ Returns:
805
+ The same function as `model_forward` but with converted outputs.
806
+ """
807
+
808
+ def __init__(self, model_forward):
809
+ self.model_forward = model_forward
810
+ update_wrapper(self, model_forward)
811
+
812
+ def __call__(self, *args, **kwargs):
813
+ return convert_to_fp32(self.model_forward(*args, **kwargs))
814
+
815
+ def __getstate__(self):
816
+ raise pickle.PicklingError(
817
+ "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
818
+ )
819
+
820
+
821
+ def convert_outputs_to_fp32(model_forward):
822
+ model_forward = ConvertOutputsToFp32(model_forward)
823
+
824
+ def forward(*args, **kwargs):
825
+ return model_forward(*args, **kwargs)
826
+
827
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
828
+ forward.__wrapped__ = model_forward
829
+
830
+ return forward
831
+
832
+
833
+ def find_device(data):
834
+ """
835
+ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
836
+
837
+ Args:
838
+ (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
839
+ """
840
+ if isinstance(data, Mapping):
841
+ for obj in data.values():
842
+ device = find_device(obj)
843
+ if device is not None:
844
+ return device
845
+ elif isinstance(data, (tuple, list)):
846
+ for obj in data:
847
+ device = find_device(obj)
848
+ if device is not None:
849
+ return device
850
+ elif isinstance(data, torch.Tensor):
851
+ return data.device
env-llmeval/lib/python3.10/site-packages/accelerate/utils/other.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import collections
16
+ import os
17
+ import platform
18
+ import re
19
+ import socket
20
+ from contextlib import contextmanager
21
+ from functools import partial, reduce
22
+ from types import MethodType
23
+ from typing import OrderedDict
24
+
25
+ import torch
26
+ from packaging.version import Version
27
+ from safetensors.torch import save_file as safe_save_file
28
+
29
+ from ..commands.config.default import write_basic_config # noqa: F401
30
+ from ..logging import get_logger
31
+ from ..state import PartialState
32
+ from .constants import FSDP_PYTORCH_VERSION
33
+ from .dataclasses import DistributedType
34
+ from .imports import is_deepspeed_available, is_torch_distributed_available, is_torch_xla_available
35
+ from .modeling import id_tensor_storage
36
+ from .transformer_engine import convert_model
37
+ from .versions import is_torch_version
38
+
39
+
40
+ logger = get_logger(__name__)
41
+
42
+
43
+ if is_torch_xla_available():
44
+ import torch_xla.core.xla_model as xm
45
+
46
+
47
+ def is_compiled_module(module):
48
+ """
49
+ Check whether the module was compiled with torch.compile()
50
+ """
51
+ if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
52
+ return False
53
+ return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
54
+
55
+
56
+ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True, recursive: bool = False):
57
+ """
58
+ Extract a model from its distributed containers.
59
+
60
+ Args:
61
+ model (`torch.nn.Module`):
62
+ The model to extract.
63
+ keep_fp32_wrapper (`bool`, *optional*):
64
+ Whether to remove mixed precision hooks from the model.
65
+ recursive (`bool`, *optional*, defaults to `False`):
66
+ Whether to recursively extract all cases of `module.module` from `model` as well as unwrap child sublayers
67
+ recursively, not just the top-level distributed containers.
68
+
69
+ Returns:
70
+ `torch.nn.Module`: The extracted model.
71
+ """
72
+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
73
+
74
+ is_compiled = is_compiled_module(model)
75
+ if is_compiled:
76
+ compiled_model = model
77
+ model = model._orig_mod
78
+
79
+ if is_deepspeed_available():
80
+ from deepspeed import DeepSpeedEngine
81
+
82
+ options += (DeepSpeedEngine,)
83
+
84
+ if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
85
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
86
+
87
+ options += (FSDP,)
88
+
89
+ while isinstance(model, options):
90
+ model = model.module
91
+
92
+ if recursive:
93
+ # This is needed in cases such as using FSDPv2 on XLA
94
+ def _recursive_unwrap(module):
95
+ # Wrapped modules are standardly wrapped as `module`, similar to the cases earlier
96
+ # with DDP, DataParallel, DeepSpeed, and FSDP
97
+ if hasattr(module, "module"):
98
+ unwrapped_module = _recursive_unwrap(module.module)
99
+ else:
100
+ unwrapped_module = module
101
+ # Next unwrap child sublayers recursively
102
+ for name, child in unwrapped_module.named_children():
103
+ setattr(unwrapped_module, name, _recursive_unwrap(child))
104
+ return unwrapped_module
105
+
106
+ # Start with top-level
107
+ model = _recursive_unwrap(model)
108
+
109
+ if not keep_fp32_wrapper:
110
+ forward = model.forward
111
+ original_forward = model.__dict__.pop("_original_forward", None)
112
+ if original_forward is not None:
113
+ while hasattr(forward, "__wrapped__"):
114
+ forward = forward.__wrapped__
115
+ if forward == original_forward:
116
+ break
117
+ model.forward = MethodType(forward, model)
118
+ if getattr(model, "_converted_to_transformer_engine", False):
119
+ convert_model(model, to_transformer_engine=False)
120
+
121
+ if is_compiled:
122
+ compiled_model._orig_mod = model
123
+ model = compiled_model
124
+
125
+ return model
126
+
127
+
128
+ def wait_for_everyone():
129
+ """
130
+ Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
131
+
132
+ <Tip warning={true}>
133
+
134
+ Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
135
+
136
+ </Tip>
137
+ """
138
+ PartialState().wait_for_everyone()
139
+
140
+
141
+ def clean_state_dict_for_safetensors(state_dict: dict):
142
+ """
143
+ Cleans the state dictionary from a model and removes tensor aliasing if present.
144
+
145
+ Args:
146
+ state_dict (`dict`):
147
+ The state dictionary from a model
148
+ """
149
+ ptrs = collections.defaultdict(list)
150
+ # When bnb serialization is used, weights in state dict can be strings
151
+ for name, tensor in state_dict.items():
152
+ if not isinstance(tensor, str):
153
+ ptrs[id_tensor_storage(tensor)].append(name)
154
+
155
+ # These are all pointers of tensors with shared memory
156
+ shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
157
+ warn_names = set()
158
+ for names in shared_ptrs.values():
159
+ # When not all duplicates have been cleaned, we still remove those keys but put a clear warning.
160
+ # If the link between tensors was done at runtime then `from_pretrained` will not get
161
+ # the key back leading to random tensor. A proper warning will be shown
162
+ # during reload (if applicable), but since the file is not necessarily compatible with
163
+ # the config, better show a proper warning.
164
+ found_names = [name for name in names if name in state_dict]
165
+ warn_names.update(found_names[1:])
166
+ for name in found_names[1:]:
167
+ del state_dict[name]
168
+ if len(warn_names) > 0:
169
+ logger.warning(
170
+ f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
171
+ )
172
+ state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
173
+ return state_dict
174
+
175
+
176
+ def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
177
+ """
178
+ Save the data to disk. Use in place of `torch.save()`.
179
+
180
+ Args:
181
+ obj:
182
+ The data to save
183
+ f:
184
+ The file (or file-like object) to use to save the data
185
+ save_on_each_node (`bool`, *optional*, defaults to `False`):
186
+ Whether to only save on the global main process
187
+ safe_serialization (`bool`, *optional*, defaults to `False`):
188
+ Whether to save `obj` using `safetensors` or the traditional PyTorch way (that uses `pickle`).
189
+ """
190
+ # When TorchXLA is enabled, it's necessary to transfer all data to the CPU before saving.
191
+ # Another issue arises with `id_tensor_storage`, which treats all XLA tensors as identical.
192
+ # If tensors remain on XLA, calling `clean_state_dict_for_safetensors` will result in only
193
+ # one XLA tensor remaining.
194
+ if PartialState().distributed_type == DistributedType.XLA:
195
+ obj = xm._maybe_convert_to_cpu(obj)
196
+ # Check if it's a model and remove duplicates
197
+ if safe_serialization:
198
+ save_func = partial(safe_save_file, metadata={"format": "pt"})
199
+ if isinstance(obj, OrderedDict):
200
+ obj = clean_state_dict_for_safetensors(obj)
201
+ else:
202
+ save_func = torch.save
203
+
204
+ if PartialState().is_main_process and not save_on_each_node:
205
+ save_func(obj, f)
206
+ elif PartialState().is_local_main_process and save_on_each_node:
207
+ save_func(obj, f)
208
+
209
+
210
+ @contextmanager
211
+ def clear_environment():
212
+ """
213
+ A context manager that will temporarily clear environment variables.
214
+
215
+ When this context exits, the previous environment variables will be back.
216
+
217
+ Example:
218
+
219
+ ```python
220
+ >>> import os
221
+ >>> from accelerate.utils import clear_environment
222
+
223
+ >>> os.environ["FOO"] = "bar"
224
+ >>> with clear_environment():
225
+ ... print(os.environ)
226
+ ... os.environ["FOO"] = "new_bar"
227
+ ... print(os.environ["FOO"])
228
+ {}
229
+ new_bar
230
+
231
+ >>> print(os.environ["FOO"])
232
+ bar
233
+ ```
234
+ """
235
+ _old_os_environ = os.environ.copy()
236
+ os.environ.clear()
237
+
238
+ try:
239
+ yield
240
+ finally:
241
+ os.environ.clear() # clear any added keys,
242
+ os.environ.update(_old_os_environ) # then restore previous environment
243
+
244
+
245
+ @contextmanager
246
+ def patch_environment(**kwargs):
247
+ """
248
+ A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
249
+
250
+ Will convert the values in `kwargs` to strings and upper-case all the keys.
251
+
252
+ Example:
253
+
254
+ ```python
255
+ >>> import os
256
+ >>> from accelerate.utils import patch_environment
257
+
258
+ >>> with patch_environment(FOO="bar"):
259
+ ... print(os.environ["FOO"]) # prints "bar"
260
+ >>> print(os.environ["FOO"]) # raises KeyError
261
+ ```
262
+ """
263
+ existing_vars = {}
264
+ for key, value in kwargs.items():
265
+ key = key.upper()
266
+ if key in os.environ:
267
+ existing_vars[key] = os.environ[key]
268
+ os.environ[key] = str(value)
269
+
270
+ try:
271
+ yield
272
+ finally:
273
+ for key in kwargs:
274
+ key = key.upper()
275
+ if key in existing_vars:
276
+ # restore previous value
277
+ os.environ[key] = existing_vars[key]
278
+ else:
279
+ os.environ.pop(key, None)
280
+
281
+
282
+ def get_pretty_name(obj):
283
+ """
284
+ Gets a pretty name from `obj`.
285
+ """
286
+ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
287
+ obj = getattr(obj, "__class__", obj)
288
+ if hasattr(obj, "__qualname__"):
289
+ return obj.__qualname__
290
+ if hasattr(obj, "__name__"):
291
+ return obj.__name__
292
+ return str(obj)
293
+
294
+
295
+ def merge_dicts(source, destination):
296
+ """
297
+ Recursively merges two dictionaries.
298
+
299
+ Args:
300
+ source (`dict`): The dictionary to merge into `destination`.
301
+ destination (`dict`): The dictionary to merge `source` into.
302
+ """
303
+ for key, value in source.items():
304
+ if isinstance(value, dict):
305
+ node = destination.setdefault(key, {})
306
+ merge_dicts(value, node)
307
+ else:
308
+ destination[key] = value
309
+
310
+ return destination
311
+
312
+
313
+ def is_port_in_use(port: int = None) -> bool:
314
+ """
315
+ Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
316
+ run and need to see if the port is already in use.
317
+ """
318
+ if port is None:
319
+ port = 29500
320
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
321
+ return s.connect_ex(("localhost", port)) == 0
322
+
323
+
324
+ def convert_bytes(size):
325
+ "Converts `size` from bytes to the largest possible unit"
326
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
327
+ if size < 1024.0:
328
+ return f"{round(size, 2)} {x}"
329
+ size /= 1024.0
330
+
331
+ return f"{round(size, 2)} PB"
332
+
333
+
334
+ def check_os_kernel():
335
+ """Warns if the kernel version is below the recommended minimum on Linux."""
336
+ # see issue #1929
337
+ info = platform.uname()
338
+ system = info.system
339
+ if system != "Linux":
340
+ return
341
+
342
+ _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
343
+ min_version = "5.5.0"
344
+ if Version(version) < Version(min_version):
345
+ msg = (
346
+ f"Detected kernel version {version}, which is below the recommended minimum of {min_version}; this can "
347
+ "cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher."
348
+ )
349
+ logger.warning(msg, main_process_only=True)
350
+
351
+
352
+ def recursive_getattr(obj, attr: str):
353
+ """
354
+ Recursive `getattr`.
355
+
356
+ Args:
357
+ obj:
358
+ A class instance holding the attribute.
359
+ attr (`str`):
360
+ The attribute that is to be retrieved, e.g. 'attribute1.attribute2'.
361
+ """
362
+
363
+ def _getattr(obj, attr):
364
+ return getattr(obj, attr)
365
+
366
+ return reduce(_getattr, [obj] + attr.split("."))
env-llmeval/lib/python3.10/site-packages/accelerate/utils/random.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from typing import List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from ..state import AcceleratorState
22
+ from .constants import CUDA_DISTRIBUTED_TYPES
23
+ from .dataclasses import DistributedType, RNGType
24
+ from .imports import is_mlu_available, is_npu_available, is_torch_xla_available, is_xpu_available
25
+
26
+
27
+ if is_torch_xla_available():
28
+ import torch_xla.core.xla_model as xm
29
+
30
+
31
+ def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False):
32
+ """
33
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
34
+
35
+ Args:
36
+ seed (`int`):
37
+ The seed to set.
38
+ device_specific (`bool`, *optional*, defaults to `False`):
39
+ Whether to differ the seed on each device slightly with `self.process_index`.
40
+ deterministic (`bool`, *optional*, defaults to `False`):
41
+ Whether to use deterministic algorithms where available. Can slow down training.
42
+ """
43
+ if device_specific:
44
+ seed += AcceleratorState().process_index
45
+ random.seed(seed)
46
+ np.random.seed(seed)
47
+ torch.manual_seed(seed)
48
+ if is_xpu_available():
49
+ torch.xpu.manual_seed_all(seed)
50
+ elif is_npu_available():
51
+ torch.npu.manual_seed_all(seed)
52
+ elif is_mlu_available():
53
+ torch.mlu.manual_seed_all(seed)
54
+ else:
55
+ torch.cuda.manual_seed_all(seed)
56
+ # ^^ safe to call this function even if cuda is not available
57
+ if is_torch_xla_available():
58
+ xm.set_rng_state(seed)
59
+
60
+ if deterministic:
61
+ torch.use_deterministic_algorithms(True)
62
+
63
+
64
+ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
65
+ # Get the proper rng state
66
+ if rng_type == RNGType.TORCH:
67
+ rng_state = torch.get_rng_state()
68
+ elif rng_type == RNGType.CUDA:
69
+ rng_state = torch.cuda.get_rng_state()
70
+ elif rng_type == RNGType.XLA:
71
+ assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable."
72
+ rng_state = torch.tensor(xm.get_rng_state())
73
+ elif rng_type == RNGType.NPU:
74
+ assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs."
75
+ rng_state = torch.npu.get_rng_state()
76
+ elif rng_type == RNGType.MLU:
77
+ assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs."
78
+ rng_state = torch.mlu.get_rng_state()
79
+ elif rng_type == RNGType.XPU:
80
+ assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs."
81
+ rng_state = torch.xpu.get_rng_state()
82
+ elif rng_type == RNGType.GENERATOR:
83
+ assert generator is not None, "Need a generator to synchronize its seed."
84
+ rng_state = generator.get_state()
85
+
86
+ # Broadcast the rng state from device 0 to other devices
87
+ state = AcceleratorState()
88
+ if state.distributed_type == DistributedType.XLA:
89
+ rng_state = rng_state.to(xm.xla_device())
90
+ xm.collective_broadcast([rng_state])
91
+ xm.mark_step()
92
+ rng_state = rng_state.cpu()
93
+ elif (
94
+ state.distributed_type in CUDA_DISTRIBUTED_TYPES
95
+ or state.distributed_type == DistributedType.MULTI_MLU
96
+ or state.distributed_type == DistributedType.MULTI_NPU
97
+ or state.distributed_type == DistributedType.MULTI_XPU
98
+ ):
99
+ rng_state = rng_state.to(state.device)
100
+ torch.distributed.broadcast(rng_state, 0)
101
+ rng_state = rng_state.cpu()
102
+ elif state.distributed_type == DistributedType.MULTI_CPU:
103
+ torch.distributed.broadcast(rng_state, 0)
104
+
105
+ # Set the broadcast rng state
106
+ if rng_type == RNGType.TORCH:
107
+ torch.set_rng_state(rng_state)
108
+ elif rng_type == RNGType.CUDA:
109
+ torch.cuda.set_rng_state(rng_state)
110
+ elif rng_type == RNGType.NPU:
111
+ torch.npu.set_rng_state(rng_state)
112
+ elif rng_type == RNGType.XPU:
113
+ torch.xpu.set_rng_state(rng_state)
114
+ elif rng_type == RNGType.XLA:
115
+ xm.set_rng_state(rng_state.item())
116
+ elif rng_type == RNGType.GENERATOR:
117
+ generator.set_state(rng_state)
118
+
119
+
120
+ def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
121
+ for rng_type in rng_types:
122
+ synchronize_rng_state(RNGType(rng_type), generator=generator)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/rich.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .imports import is_rich_available
16
+
17
+
18
+ if is_rich_available():
19
+ from rich.traceback import install
20
+
21
+ install(show_locals=False)
22
+
23
+ else:
24
+ raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
env-llmeval/lib/python3.10/site-packages/accelerate/utils/torch_xla.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.metadata
16
+ import subprocess
17
+ import sys
18
+
19
+
20
+ def install_xla(upgrade: bool = False):
21
+ """
22
+ Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory.
23
+
24
+ Args:
25
+ upgrade (`bool`, *optional*, defaults to `False`):
26
+ Whether to upgrade `torch` and install the latest `torch_xla` wheels.
27
+
28
+ Example:
29
+
30
+ ```python
31
+ >>> from accelerate.utils import install_xla
32
+
33
+ >>> install_xla(upgrade=True)
34
+ ```
35
+ """
36
+ in_colab = False
37
+ if "IPython" in sys.modules:
38
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
39
+
40
+ if in_colab:
41
+ if upgrade:
42
+ torch_install_cmd = ["pip", "install", "-U", "torch"]
43
+ subprocess.run(torch_install_cmd, check=True)
44
+ # get the current version of torch
45
+ torch_version = importlib.metadata.version("torch")
46
+ torch_version_trunc = torch_version[: torch_version.rindex(".")]
47
+ xla_wheel = f"https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-{torch_version_trunc}-cp37-cp37m-linux_x86_64.whl"
48
+ xla_install_cmd = ["pip", "install", xla_wheel]
49
+ subprocess.run(xla_install_cmd, check=True)
50
+ else:
51
+ raise RuntimeError("`install_xla` utility works only on google colab.")
env-llmeval/lib/python3.10/site-packages/accelerate/utils/tqdm.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .imports import is_tqdm_available
16
+
17
+
18
+ if is_tqdm_available():
19
+ from tqdm.auto import tqdm as _tqdm
20
+
21
+ from ..state import PartialState
22
+
23
+
24
+ def tqdm(main_process_only: bool = True, *args, **kwargs):
25
+ """
26
+ Wrapper around `tqdm.tqdm` that optionally displays only on the main process.
27
+
28
+ Args:
29
+ main_process_only (`bool`, *optional*):
30
+ Whether to display the progress bar only on the main process
31
+ """
32
+ if not is_tqdm_available():
33
+ raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.")
34
+ disable = False
35
+ if main_process_only:
36
+ disable = PartialState().local_process_index != 0
37
+ return _tqdm(*args, **kwargs, disable=disable)
env-llmeval/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch.nn as nn
16
+
17
+ from .imports import is_fp8_available
18
+
19
+
20
+ if is_fp8_available():
21
+ import transformer_engine.pytorch as te
22
+
23
+
24
+ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True):
25
+ """
26
+ Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart.
27
+ """
28
+ if not is_fp8_available():
29
+ raise ImportError("Using `convert_model` requires transformer_engine to be installed.")
30
+ for name, module in model.named_children():
31
+ if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear:
32
+ # Return early if the linear layer weights are not multiples of 16
33
+ if any(p % 16 != 0 for p in module.weight.shape):
34
+ return
35
+ has_bias = module.bias is not None
36
+ te_module = te.Linear(
37
+ module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
38
+ )
39
+ te_module.weight.copy_(module.weight)
40
+ if has_bias:
41
+ te_module.bias.copy_(module.bias)
42
+
43
+ setattr(model, name, te_module)
44
+ elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln:
45
+ te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
46
+ te_module.weight.copy_(module.weight)
47
+ te_module.bias.copy_(module.bias)
48
+
49
+ setattr(model, name, te_module)
50
+ elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear:
51
+ has_bias = module.bias is not None
52
+ new_module = nn.Linear(
53
+ module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype
54
+ )
55
+ new_module.weight.copy_(module.weight)
56
+ if has_bias:
57
+ new_module.bias.copy_(module.bias)
58
+
59
+ setattr(model, name, new_module)
60
+ elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln:
61
+ new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
62
+ new_module.weight.copy_(module.weight)
63
+ new_module.bias.copy_(module.bias)
64
+
65
+ setattr(model, name, new_module)
66
+ else:
67
+ convert_model(
68
+ module,
69
+ to_transformer_engine=to_transformer_engine,
70
+ _convert_linear=_convert_linear,
71
+ _convert_ln=_convert_ln,
72
+ )
73
+
74
+
75
+ def has_transformer_engine_layers(model):
76
+ """
77
+ Returns whether a given model has some `transformer_engine` layer or not.
78
+ """
79
+ if not is_fp8_available():
80
+ raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.")
81
+ for m in model.modules():
82
+ if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)):
83
+ return True
84
+ return False
env-llmeval/lib/python3.10/site-packages/accelerate/utils/versions.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib.metadata
16
+ from typing import Union
17
+
18
+ from packaging.version import Version, parse
19
+
20
+ from .constants import STR_OPERATION_TO_FUNC
21
+
22
+
23
+ torch_version = parse(importlib.metadata.version("torch"))
24
+
25
+
26
+ def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
27
+ """
28
+ Compares a library version to some requirement using a given operation.
29
+
30
+ Args:
31
+ library_or_version (`str` or `packaging.version.Version`):
32
+ A library name or a version to check.
33
+ operation (`str`):
34
+ A string representation of an operator, such as `">"` or `"<="`.
35
+ requirement_version (`str`):
36
+ The version to compare the library version against
37
+ """
38
+ if operation not in STR_OPERATION_TO_FUNC.keys():
39
+ raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
40
+ operation = STR_OPERATION_TO_FUNC[operation]
41
+ if isinstance(library_or_version, str):
42
+ library_or_version = parse(importlib.metadata.version(library_or_version))
43
+ return operation(library_or_version, parse(requirement_version))
44
+
45
+
46
+ def is_torch_version(operation: str, version: str):
47
+ """
48
+ Compares the current PyTorch version to a given reference with an operation.
49
+
50
+ Args:
51
+ operation (`str`):
52
+ A string representation of an operator, such as `">"` or `"<="`
53
+ version (`str`):
54
+ A string version of PyTorch
55
+ """
56
+ return compare_versions(torch_version, operation, version)
env-llmeval/lib/python3.10/site-packages/numpy/__config__.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is generated by numpy's build process
2
+ # It contains system_info results at the time of building this package.
3
+ from enum import Enum
4
+ from numpy.core._multiarray_umath import (
5
+ __cpu_features__,
6
+ __cpu_baseline__,
7
+ __cpu_dispatch__,
8
+ )
9
+
10
+ __all__ = ["show"]
11
+ _built_with_meson = True
12
+
13
+
14
+ class DisplayModes(Enum):
15
+ stdout = "stdout"
16
+ dicts = "dicts"
17
+
18
+
19
+ def _cleanup(d):
20
+ """
21
+ Removes empty values in a `dict` recursively
22
+ This ensures we remove values that Meson could not provide to CONFIG
23
+ """
24
+ if isinstance(d, dict):
25
+ return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)}
26
+ else:
27
+ return d
28
+
29
+
30
+ CONFIG = _cleanup(
31
+ {
32
+ "Compilers": {
33
+ "c": {
34
+ "name": "gcc",
35
+ "linker": r"ld.bfd",
36
+ "version": "10.2.1",
37
+ "commands": r"cc",
38
+ "args": r"-fno-strict-aliasing",
39
+ "linker args": r"-Wl,--strip-debug, -fno-strict-aliasing",
40
+ },
41
+ "cython": {
42
+ "name": "cython",
43
+ "linker": r"cython",
44
+ "version": "3.0.8",
45
+ "commands": r"cython",
46
+ "args": r"",
47
+ "linker args": r"",
48
+ },
49
+ "c++": {
50
+ "name": "gcc",
51
+ "linker": r"ld.bfd",
52
+ "version": "10.2.1",
53
+ "commands": r"c++",
54
+ "args": r"",
55
+ "linker args": r"-Wl,--strip-debug",
56
+ },
57
+ },
58
+ "Machine Information": {
59
+ "host": {
60
+ "cpu": "x86_64",
61
+ "family": "x86_64",
62
+ "endian": "little",
63
+ "system": "linux",
64
+ },
65
+ "build": {
66
+ "cpu": "x86_64",
67
+ "family": "x86_64",
68
+ "endian": "little",
69
+ "system": "linux",
70
+ },
71
+ "cross-compiled": bool("False".lower().replace("false", "")),
72
+ },
73
+ "Build Dependencies": {
74
+ "blas": {
75
+ "name": "openblas64",
76
+ "found": bool("True".lower().replace("false", "")),
77
+ "version": "0.3.23.dev",
78
+ "detection method": "pkgconfig",
79
+ "include directory": r"/usr/local/include",
80
+ "lib directory": r"/usr/local/lib",
81
+ "openblas configuration": r"USE_64BITINT=1 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= HASWELL MAX_THREADS=2",
82
+ "pc file directory": r"/usr/local/lib/pkgconfig",
83
+ },
84
+ "lapack": {
85
+ "name": "dep139863411681952",
86
+ "found": bool("True".lower().replace("false", "")),
87
+ "version": "1.26.4",
88
+ "detection method": "internal",
89
+ "include directory": r"unknown",
90
+ "lib directory": r"unknown",
91
+ "openblas configuration": r"unknown",
92
+ "pc file directory": r"unknown",
93
+ },
94
+ },
95
+ "Python Information": {
96
+ "path": r"/opt/python/cp310-cp310/bin/python",
97
+ "version": "3.10",
98
+ },
99
+ "SIMD Extensions": {
100
+ "baseline": __cpu_baseline__,
101
+ "found": [
102
+ feature for feature in __cpu_dispatch__ if __cpu_features__[feature]
103
+ ],
104
+ "not found": [
105
+ feature for feature in __cpu_dispatch__ if not __cpu_features__[feature]
106
+ ],
107
+ },
108
+ }
109
+ )
110
+
111
+
112
+ def _check_pyyaml():
113
+ import yaml
114
+
115
+ return yaml
116
+
117
+
118
+ def show(mode=DisplayModes.stdout.value):
119
+ """
120
+ Show libraries and system information on which NumPy was built
121
+ and is being used
122
+
123
+ Parameters
124
+ ----------
125
+ mode : {`'stdout'`, `'dicts'`}, optional.
126
+ Indicates how to display the config information.
127
+ `'stdout'` prints to console, `'dicts'` returns a dictionary
128
+ of the configuration.
129
+
130
+ Returns
131
+ -------
132
+ out : {`dict`, `None`}
133
+ If mode is `'dicts'`, a dict is returned, else None
134
+
135
+ See Also
136
+ --------
137
+ get_include : Returns the directory containing NumPy C
138
+ header files.
139
+
140
+ Notes
141
+ -----
142
+ 1. The `'stdout'` mode will give more readable
143
+ output if ``pyyaml`` is installed
144
+
145
+ """
146
+ if mode == DisplayModes.stdout.value:
147
+ try: # Non-standard library, check import
148
+ yaml = _check_pyyaml()
149
+
150
+ print(yaml.dump(CONFIG))
151
+ except ModuleNotFoundError:
152
+ import warnings
153
+ import json
154
+
155
+ warnings.warn("Install `pyyaml` for better output", stacklevel=1)
156
+ print(json.dumps(CONFIG, indent=2))
157
+ elif mode == DisplayModes.dicts.value:
158
+ return CONFIG
159
+ else:
160
+ raise AttributeError(
161
+ f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
162
+ )
env-llmeval/lib/python3.10/site-packages/numpy/__init__.cython-30.pxd ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NumPy static imports for Cython >= 3.0
2
+ #
3
+ # If any of the PyArray_* functions are called, import_array must be
4
+ # called first. This is done automatically by Cython 3.0+ if a call
5
+ # is not detected inside of the module.
6
+ #
7
+ # Author: Dag Sverre Seljebotn
8
+ #
9
+
10
+ from cpython.ref cimport Py_INCREF
11
+ from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck
12
+ cimport libc.stdio as stdio
13
+
14
+
15
+ cdef extern from *:
16
+ # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython.
17
+ # See https://github.com/cython/cython/issues/3573
18
+ """
19
+ /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */
20
+ """
21
+
22
+
23
+ cdef extern from "Python.h":
24
+ ctypedef int Py_intptr_t
25
+
26
+ cdef extern from "numpy/arrayobject.h":
27
+ ctypedef Py_intptr_t npy_intp
28
+ ctypedef size_t npy_uintp
29
+
30
+ cdef enum NPY_TYPES:
31
+ NPY_BOOL
32
+ NPY_BYTE
33
+ NPY_UBYTE
34
+ NPY_SHORT
35
+ NPY_USHORT
36
+ NPY_INT
37
+ NPY_UINT
38
+ NPY_LONG
39
+ NPY_ULONG
40
+ NPY_LONGLONG
41
+ NPY_ULONGLONG
42
+ NPY_FLOAT
43
+ NPY_DOUBLE
44
+ NPY_LONGDOUBLE
45
+ NPY_CFLOAT
46
+ NPY_CDOUBLE
47
+ NPY_CLONGDOUBLE
48
+ NPY_OBJECT
49
+ NPY_STRING
50
+ NPY_UNICODE
51
+ NPY_VOID
52
+ NPY_DATETIME
53
+ NPY_TIMEDELTA
54
+ NPY_NTYPES
55
+ NPY_NOTYPE
56
+
57
+ NPY_INT8
58
+ NPY_INT16
59
+ NPY_INT32
60
+ NPY_INT64
61
+ NPY_INT128
62
+ NPY_INT256
63
+ NPY_UINT8
64
+ NPY_UINT16
65
+ NPY_UINT32
66
+ NPY_UINT64
67
+ NPY_UINT128
68
+ NPY_UINT256
69
+ NPY_FLOAT16
70
+ NPY_FLOAT32
71
+ NPY_FLOAT64
72
+ NPY_FLOAT80
73
+ NPY_FLOAT96
74
+ NPY_FLOAT128
75
+ NPY_FLOAT256
76
+ NPY_COMPLEX32
77
+ NPY_COMPLEX64
78
+ NPY_COMPLEX128
79
+ NPY_COMPLEX160
80
+ NPY_COMPLEX192
81
+ NPY_COMPLEX256
82
+ NPY_COMPLEX512
83
+
84
+ NPY_INTP
85
+
86
+ ctypedef enum NPY_ORDER:
87
+ NPY_ANYORDER
88
+ NPY_CORDER
89
+ NPY_FORTRANORDER
90
+ NPY_KEEPORDER
91
+
92
+ ctypedef enum NPY_CASTING:
93
+ NPY_NO_CASTING
94
+ NPY_EQUIV_CASTING
95
+ NPY_SAFE_CASTING
96
+ NPY_SAME_KIND_CASTING
97
+ NPY_UNSAFE_CASTING
98
+
99
+ ctypedef enum NPY_CLIPMODE:
100
+ NPY_CLIP
101
+ NPY_WRAP
102
+ NPY_RAISE
103
+
104
+ ctypedef enum NPY_SCALARKIND:
105
+ NPY_NOSCALAR,
106
+ NPY_BOOL_SCALAR,
107
+ NPY_INTPOS_SCALAR,
108
+ NPY_INTNEG_SCALAR,
109
+ NPY_FLOAT_SCALAR,
110
+ NPY_COMPLEX_SCALAR,
111
+ NPY_OBJECT_SCALAR
112
+
113
+ ctypedef enum NPY_SORTKIND:
114
+ NPY_QUICKSORT
115
+ NPY_HEAPSORT
116
+ NPY_MERGESORT
117
+
118
+ ctypedef enum NPY_SEARCHSIDE:
119
+ NPY_SEARCHLEFT
120
+ NPY_SEARCHRIGHT
121
+
122
+ enum:
123
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
124
+ NPY_C_CONTIGUOUS
125
+ NPY_F_CONTIGUOUS
126
+ NPY_CONTIGUOUS
127
+ NPY_FORTRAN
128
+ NPY_OWNDATA
129
+ NPY_FORCECAST
130
+ NPY_ENSURECOPY
131
+ NPY_ENSUREARRAY
132
+ NPY_ELEMENTSTRIDES
133
+ NPY_ALIGNED
134
+ NPY_NOTSWAPPED
135
+ NPY_WRITEABLE
136
+ NPY_ARR_HAS_DESCR
137
+
138
+ NPY_BEHAVED
139
+ NPY_BEHAVED_NS
140
+ NPY_CARRAY
141
+ NPY_CARRAY_RO
142
+ NPY_FARRAY
143
+ NPY_FARRAY_RO
144
+ NPY_DEFAULT
145
+
146
+ NPY_IN_ARRAY
147
+ NPY_OUT_ARRAY
148
+ NPY_INOUT_ARRAY
149
+ NPY_IN_FARRAY
150
+ NPY_OUT_FARRAY
151
+ NPY_INOUT_FARRAY
152
+
153
+ NPY_UPDATE_ALL
154
+
155
+ enum:
156
+ # Added in NumPy 1.7 to replace the deprecated enums above.
157
+ NPY_ARRAY_C_CONTIGUOUS
158
+ NPY_ARRAY_F_CONTIGUOUS
159
+ NPY_ARRAY_OWNDATA
160
+ NPY_ARRAY_FORCECAST
161
+ NPY_ARRAY_ENSURECOPY
162
+ NPY_ARRAY_ENSUREARRAY
163
+ NPY_ARRAY_ELEMENTSTRIDES
164
+ NPY_ARRAY_ALIGNED
165
+ NPY_ARRAY_NOTSWAPPED
166
+ NPY_ARRAY_WRITEABLE
167
+ NPY_ARRAY_WRITEBACKIFCOPY
168
+
169
+ NPY_ARRAY_BEHAVED
170
+ NPY_ARRAY_BEHAVED_NS
171
+ NPY_ARRAY_CARRAY
172
+ NPY_ARRAY_CARRAY_RO
173
+ NPY_ARRAY_FARRAY
174
+ NPY_ARRAY_FARRAY_RO
175
+ NPY_ARRAY_DEFAULT
176
+
177
+ NPY_ARRAY_IN_ARRAY
178
+ NPY_ARRAY_OUT_ARRAY
179
+ NPY_ARRAY_INOUT_ARRAY
180
+ NPY_ARRAY_IN_FARRAY
181
+ NPY_ARRAY_OUT_FARRAY
182
+ NPY_ARRAY_INOUT_FARRAY
183
+
184
+ NPY_ARRAY_UPDATE_ALL
185
+
186
+ cdef enum:
187
+ NPY_MAXDIMS
188
+
189
+ npy_intp NPY_MAX_ELSIZE
190
+
191
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
192
+
193
+ ctypedef struct PyArray_ArrayDescr:
194
+ # shape is a tuple, but Cython doesn't support "tuple shape"
195
+ # inside a non-PyObject declaration, so we have to declare it
196
+ # as just a PyObject*.
197
+ PyObject* shape
198
+
199
+ ctypedef struct PyArray_Descr:
200
+ pass
201
+
202
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
203
+ # Use PyDataType_* macros when possible, however there are no macros
204
+ # for accessing some of the fields, so some are defined.
205
+ cdef PyTypeObject* typeobj
206
+ cdef char kind
207
+ cdef char type
208
+ # Numpy sometimes mutates this without warning (e.g. it'll
209
+ # sometimes change "|" to "<" in shared dtype objects on
210
+ # little-endian machines). If this matters to you, use
211
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
212
+ # directly accessing this field.
213
+ cdef char byteorder
214
+ cdef char flags
215
+ cdef int type_num
216
+ cdef int itemsize "elsize"
217
+ cdef int alignment
218
+ cdef object fields
219
+ cdef tuple names
220
+ # Use PyDataType_HASSUBARRAY to test whether this field is
221
+ # valid (the pointer can be NULL). Most users should access
222
+ # this field via the inline helper method PyDataType_SHAPE.
223
+ cdef PyArray_ArrayDescr* subarray
224
+
225
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
226
+ # Use through macros
227
+ pass
228
+
229
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
230
+ # Use through macros
231
+ pass
232
+
233
+ ctypedef struct PyArrayObject:
234
+ # For use in situations where ndarray can't replace PyArrayObject*,
235
+ # like PyArrayObject**.
236
+ pass
237
+
238
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
239
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
240
+
241
+ # NOTE: no field declarations since direct access is deprecated since NumPy 1.7
242
+ # Instead, we use properties that map to the corresponding C-API functions.
243
+
244
+ @property
245
+ cdef inline PyObject* base(self) nogil:
246
+ """Returns a borrowed reference to the object owning the data/memory.
247
+ """
248
+ return PyArray_BASE(self)
249
+
250
+ @property
251
+ cdef inline dtype descr(self):
252
+ """Returns an owned reference to the dtype of the array.
253
+ """
254
+ return <dtype>PyArray_DESCR(self)
255
+
256
+ @property
257
+ cdef inline int ndim(self) nogil:
258
+ """Returns the number of dimensions in the array.
259
+ """
260
+ return PyArray_NDIM(self)
261
+
262
+ @property
263
+ cdef inline npy_intp *shape(self) nogil:
264
+ """Returns a pointer to the dimensions/shape of the array.
265
+ The number of elements matches the number of dimensions of the array (ndim).
266
+ Can return NULL for 0-dimensional arrays.
267
+ """
268
+ return PyArray_DIMS(self)
269
+
270
+ @property
271
+ cdef inline npy_intp *strides(self) nogil:
272
+ """Returns a pointer to the strides of the array.
273
+ The number of elements matches the number of dimensions of the array (ndim).
274
+ """
275
+ return PyArray_STRIDES(self)
276
+
277
+ @property
278
+ cdef inline npy_intp size(self) nogil:
279
+ """Returns the total size (in number of elements) of the array.
280
+ """
281
+ return PyArray_SIZE(self)
282
+
283
+ @property
284
+ cdef inline char* data(self) nogil:
285
+ """The pointer to the data buffer as a char*.
286
+ This is provided for legacy reasons to avoid direct struct field access.
287
+ For new code that needs this access, you probably want to cast the result
288
+ of `PyArray_DATA()` instead, which returns a 'void*'.
289
+ """
290
+ return PyArray_BYTES(self)
291
+
292
+ ctypedef unsigned char npy_bool
293
+
294
+ ctypedef signed char npy_byte
295
+ ctypedef signed short npy_short
296
+ ctypedef signed int npy_int
297
+ ctypedef signed long npy_long
298
+ ctypedef signed long long npy_longlong
299
+
300
+ ctypedef unsigned char npy_ubyte
301
+ ctypedef unsigned short npy_ushort
302
+ ctypedef unsigned int npy_uint
303
+ ctypedef unsigned long npy_ulong
304
+ ctypedef unsigned long long npy_ulonglong
305
+
306
+ ctypedef float npy_float
307
+ ctypedef double npy_double
308
+ ctypedef long double npy_longdouble
309
+
310
+ ctypedef signed char npy_int8
311
+ ctypedef signed short npy_int16
312
+ ctypedef signed int npy_int32
313
+ ctypedef signed long long npy_int64
314
+ ctypedef signed long long npy_int96
315
+ ctypedef signed long long npy_int128
316
+
317
+ ctypedef unsigned char npy_uint8
318
+ ctypedef unsigned short npy_uint16
319
+ ctypedef unsigned int npy_uint32
320
+ ctypedef unsigned long long npy_uint64
321
+ ctypedef unsigned long long npy_uint96
322
+ ctypedef unsigned long long npy_uint128
323
+
324
+ ctypedef float npy_float32
325
+ ctypedef double npy_float64
326
+ ctypedef long double npy_float80
327
+ ctypedef long double npy_float96
328
+ ctypedef long double npy_float128
329
+
330
+ ctypedef struct npy_cfloat:
331
+ float real
332
+ float imag
333
+
334
+ ctypedef struct npy_cdouble:
335
+ double real
336
+ double imag
337
+
338
+ ctypedef struct npy_clongdouble:
339
+ long double real
340
+ long double imag
341
+
342
+ ctypedef struct npy_complex64:
343
+ float real
344
+ float imag
345
+
346
+ ctypedef struct npy_complex128:
347
+ double real
348
+ double imag
349
+
350
+ ctypedef struct npy_complex160:
351
+ long double real
352
+ long double imag
353
+
354
+ ctypedef struct npy_complex192:
355
+ long double real
356
+ long double imag
357
+
358
+ ctypedef struct npy_complex256:
359
+ long double real
360
+ long double imag
361
+
362
+ ctypedef struct PyArray_Dims:
363
+ npy_intp *ptr
364
+ int len
365
+
366
+ int _import_array() except -1
367
+ # A second definition so _import_array isn't marked as used when we use it here.
368
+ # Do not use - subject to change any time.
369
+ int __pyx_import_array "_import_array"() except -1
370
+
371
+ #
372
+ # Macros from ndarrayobject.h
373
+ #
374
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
375
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
376
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
377
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
378
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
379
+ bint PyArray_ISALIGNED(ndarray m) nogil
380
+
381
+ int PyArray_NDIM(ndarray) nogil
382
+ bint PyArray_ISONESEGMENT(ndarray) nogil
383
+ bint PyArray_ISFORTRAN(ndarray) nogil
384
+ int PyArray_FORTRANIF(ndarray) nogil
385
+
386
+ void* PyArray_DATA(ndarray) nogil
387
+ char* PyArray_BYTES(ndarray) nogil
388
+
389
+ npy_intp* PyArray_DIMS(ndarray) nogil
390
+ npy_intp* PyArray_STRIDES(ndarray) nogil
391
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
392
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
393
+
394
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
395
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
396
+ PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr.
397
+ int PyArray_FLAGS(ndarray) nogil
398
+ void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
399
+ void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
400
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
401
+ int PyArray_TYPE(ndarray arr) nogil
402
+
403
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
404
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1
405
+
406
+ bint PyTypeNum_ISBOOL(int) nogil
407
+ bint PyTypeNum_ISUNSIGNED(int) nogil
408
+ bint PyTypeNum_ISSIGNED(int) nogil
409
+ bint PyTypeNum_ISINTEGER(int) nogil
410
+ bint PyTypeNum_ISFLOAT(int) nogil
411
+ bint PyTypeNum_ISNUMBER(int) nogil
412
+ bint PyTypeNum_ISSTRING(int) nogil
413
+ bint PyTypeNum_ISCOMPLEX(int) nogil
414
+ bint PyTypeNum_ISPYTHON(int) nogil
415
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
416
+ bint PyTypeNum_ISUSERDEF(int) nogil
417
+ bint PyTypeNum_ISEXTENDED(int) nogil
418
+ bint PyTypeNum_ISOBJECT(int) nogil
419
+
420
+ bint PyDataType_ISBOOL(dtype) nogil
421
+ bint PyDataType_ISUNSIGNED(dtype) nogil
422
+ bint PyDataType_ISSIGNED(dtype) nogil
423
+ bint PyDataType_ISINTEGER(dtype) nogil
424
+ bint PyDataType_ISFLOAT(dtype) nogil
425
+ bint PyDataType_ISNUMBER(dtype) nogil
426
+ bint PyDataType_ISSTRING(dtype) nogil
427
+ bint PyDataType_ISCOMPLEX(dtype) nogil
428
+ bint PyDataType_ISPYTHON(dtype) nogil
429
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
430
+ bint PyDataType_ISUSERDEF(dtype) nogil
431
+ bint PyDataType_ISEXTENDED(dtype) nogil
432
+ bint PyDataType_ISOBJECT(dtype) nogil
433
+ bint PyDataType_HASFIELDS(dtype) nogil
434
+ bint PyDataType_HASSUBARRAY(dtype) nogil
435
+
436
+ bint PyArray_ISBOOL(ndarray) nogil
437
+ bint PyArray_ISUNSIGNED(ndarray) nogil
438
+ bint PyArray_ISSIGNED(ndarray) nogil
439
+ bint PyArray_ISINTEGER(ndarray) nogil
440
+ bint PyArray_ISFLOAT(ndarray) nogil
441
+ bint PyArray_ISNUMBER(ndarray) nogil
442
+ bint PyArray_ISSTRING(ndarray) nogil
443
+ bint PyArray_ISCOMPLEX(ndarray) nogil
444
+ bint PyArray_ISPYTHON(ndarray) nogil
445
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
446
+ bint PyArray_ISUSERDEF(ndarray) nogil
447
+ bint PyArray_ISEXTENDED(ndarray) nogil
448
+ bint PyArray_ISOBJECT(ndarray) nogil
449
+ bint PyArray_HASFIELDS(ndarray) nogil
450
+
451
+ bint PyArray_ISVARIABLE(ndarray) nogil
452
+
453
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
454
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
455
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
456
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
457
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
458
+
459
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
460
+
461
+ bint PyArray_ISCARRAY(ndarray) nogil
462
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
463
+ bint PyArray_ISFARRAY(ndarray) nogil
464
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
465
+ bint PyArray_ISBEHAVED(ndarray) nogil
466
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
467
+
468
+
469
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
470
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
471
+
472
+ bint PyArray_DescrCheck(object)
473
+
474
+ bint PyArray_Check(object)
475
+ bint PyArray_CheckExact(object)
476
+
477
+ # Cannot be supported due to out arg:
478
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
479
+ # bint PyArray_HasArrayInterface(op, out)
480
+
481
+
482
+ bint PyArray_IsZeroDim(object)
483
+ # Cannot be supported due to ## ## in macro:
484
+ # bint PyArray_IsScalar(object, verbatim work)
485
+ bint PyArray_CheckScalar(object)
486
+ bint PyArray_IsPythonNumber(object)
487
+ bint PyArray_IsPythonScalar(object)
488
+ bint PyArray_IsAnyScalar(object)
489
+ bint PyArray_CheckAnyScalar(object)
490
+
491
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
492
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
493
+ npy_intp PyArray_SIZE(ndarray) nogil
494
+ npy_intp PyArray_NBYTES(ndarray) nogil
495
+
496
+ object PyArray_FROM_O(object)
497
+ object PyArray_FROM_OF(object m, int flags)
498
+ object PyArray_FROM_OT(object m, int type)
499
+ object PyArray_FROM_OTF(object m, int type, int flags)
500
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
501
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
502
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
503
+ void PyArray_FILLWBYTE(object, int val)
504
+ npy_intp PyArray_REFCOUNT(object)
505
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
506
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
507
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
508
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
509
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
510
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
511
+ object PyArray_ToScalar(void* data, ndarray arr)
512
+
513
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
514
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
515
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
516
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
517
+
518
+ # Cannot be supported due to out arg
519
+ # void PyArray_DESCR_REPLACE(descr)
520
+
521
+
522
+ object PyArray_Copy(ndarray)
523
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
524
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
525
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
526
+
527
+ object PyArray_Cast(ndarray mp, int type_num)
528
+ object PyArray_Take(ndarray ap, object items, int axis)
529
+ object PyArray_Put(ndarray ap, object items, object values)
530
+
531
+ void PyArray_ITER_RESET(flatiter it) nogil
532
+ void PyArray_ITER_NEXT(flatiter it) nogil
533
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
534
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
535
+ void* PyArray_ITER_DATA(flatiter it) nogil
536
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
537
+
538
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
539
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
540
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
541
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
542
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
543
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
544
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
545
+
546
+ # Functions from __multiarray_api.h
547
+
548
+ # Functions taking dtype and returning object/ndarray are disabled
549
+ # for now as they steal dtype references. I'm conservative and disable
550
+ # more than is probably needed until it can be checked further.
551
+ int PyArray_SetNumericOps (object) except -1
552
+ object PyArray_GetNumericOps ()
553
+ int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF...
554
+ int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF...
555
+ void PyArray_SetStringFunction (object, int)
556
+ dtype PyArray_DescrFromType (int)
557
+ object PyArray_TypeObjectFromType (int)
558
+ char * PyArray_Zero (ndarray)
559
+ char * PyArray_One (ndarray)
560
+ #object PyArray_CastToType (ndarray, dtype, int)
561
+ int PyArray_CastTo (ndarray, ndarray) except -1
562
+ int PyArray_CastAnyTo (ndarray, ndarray) except -1
563
+ int PyArray_CanCastSafely (int, int) # writes errors
564
+ npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors
565
+ int PyArray_ObjectType (object, int) except 0
566
+ dtype PyArray_DescrFromObject (object, dtype)
567
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
568
+ dtype PyArray_DescrFromScalar (object)
569
+ dtype PyArray_DescrFromTypeObject (object)
570
+ npy_intp PyArray_Size (object)
571
+ #object PyArray_Scalar (void *, dtype, object)
572
+ #object PyArray_FromScalar (object, dtype)
573
+ void PyArray_ScalarAsCtype (object, void *)
574
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
575
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
576
+ object PyArray_ScalarFromObject (object)
577
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
578
+ object PyArray_FromDims (int, int *, int)
579
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
580
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
581
+ object PyArray_EnsureArray (object)
582
+ object PyArray_EnsureAnyArray (object)
583
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
584
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
585
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
586
+ #object PyArray_FromIter (object, dtype, npy_intp)
587
+ object PyArray_Return (ndarray)
588
+ #object PyArray_GetField (ndarray, dtype, int)
589
+ #int PyArray_SetField (ndarray, dtype, int, object) except -1
590
+ object PyArray_Byteswap (ndarray, npy_bool)
591
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
592
+ int PyArray_MoveInto (ndarray, ndarray) except -1
593
+ int PyArray_CopyInto (ndarray, ndarray) except -1
594
+ int PyArray_CopyAnyInto (ndarray, ndarray) except -1
595
+ int PyArray_CopyObject (ndarray, object) except -1
596
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
597
+ object PyArray_ToList (ndarray)
598
+ object PyArray_ToString (ndarray, NPY_ORDER)
599
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1
600
+ int PyArray_Dump (object, object, int) except -1
601
+ object PyArray_Dumps (object, int)
602
+ int PyArray_ValidType (int) # Cannot error
603
+ void PyArray_UpdateFlags (ndarray, int)
604
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
605
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
606
+ #dtype PyArray_DescrNew (dtype)
607
+ dtype PyArray_DescrNewFromType (int)
608
+ double PyArray_GetPriority (object, double) # clears errors as of 1.25
609
+ object PyArray_IterNew (object)
610
+ object PyArray_MultiIterNew (int, ...)
611
+
612
+ int PyArray_PyIntAsInt (object) except? -1
613
+ npy_intp PyArray_PyIntAsIntp (object)
614
+ int PyArray_Broadcast (broadcast) except -1
615
+ void PyArray_FillObjectArray (ndarray, object) except *
616
+ int PyArray_FillWithScalar (ndarray, object) except -1
617
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
618
+ dtype PyArray_DescrNewByteorder (dtype, char)
619
+ object PyArray_IterAllButAxis (object, int *)
620
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
621
+ #object PyArray_FromArray (ndarray, dtype, int)
622
+ object PyArray_FromInterface (object)
623
+ object PyArray_FromStructInterface (object)
624
+ #object PyArray_FromArrayAttr (object, dtype, object)
625
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
626
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
627
+ object PyArray_NewFlagsObject (object)
628
+ npy_bool PyArray_CanCastScalar (type, type)
629
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
630
+ int PyArray_RemoveSmallest (broadcast) except -1
631
+ int PyArray_ElementStrides (object)
632
+ void PyArray_Item_INCREF (char *, dtype) except *
633
+ void PyArray_Item_XDECREF (char *, dtype) except *
634
+ object PyArray_FieldNames (object)
635
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
636
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
637
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
638
+ object PyArray_PutMask (ndarray, object, object)
639
+ object PyArray_Repeat (ndarray, object, int)
640
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
641
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1
642
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
643
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
644
+ object PyArray_ArgMax (ndarray, int, ndarray)
645
+ object PyArray_ArgMin (ndarray, int, ndarray)
646
+ object PyArray_Reshape (ndarray, object)
647
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
648
+ object PyArray_Squeeze (ndarray)
649
+ #object PyArray_View (ndarray, dtype, type)
650
+ object PyArray_SwapAxes (ndarray, int, int)
651
+ object PyArray_Max (ndarray, int, ndarray)
652
+ object PyArray_Min (ndarray, int, ndarray)
653
+ object PyArray_Ptp (ndarray, int, ndarray)
654
+ object PyArray_Mean (ndarray, int, int, ndarray)
655
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
656
+ object PyArray_Diagonal (ndarray, int, int, int)
657
+ object PyArray_Clip (ndarray, object, object, ndarray)
658
+ object PyArray_Conjugate (ndarray, ndarray)
659
+ object PyArray_Nonzero (ndarray)
660
+ object PyArray_Std (ndarray, int, int, ndarray, int)
661
+ object PyArray_Sum (ndarray, int, int, ndarray)
662
+ object PyArray_CumSum (ndarray, int, int, ndarray)
663
+ object PyArray_Prod (ndarray, int, int, ndarray)
664
+ object PyArray_CumProd (ndarray, int, int, ndarray)
665
+ object PyArray_All (ndarray, int, ndarray)
666
+ object PyArray_Any (ndarray, int, ndarray)
667
+ object PyArray_Compress (ndarray, object, int, ndarray)
668
+ object PyArray_Flatten (ndarray, NPY_ORDER)
669
+ object PyArray_Ravel (ndarray, NPY_ORDER)
670
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
671
+ int PyArray_MultiplyIntList (int *, int)
672
+ void * PyArray_GetPtr (ndarray, npy_intp*)
673
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
674
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
675
+ #int PyArray_As1D (object*, char **, int *, int)
676
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
677
+ int PyArray_Free (object, void *)
678
+ #int PyArray_Converter (object, object*)
679
+ int PyArray_IntpFromSequence (object, npy_intp *, int) except -1
680
+ object PyArray_Concatenate (object, int)
681
+ object PyArray_InnerProduct (object, object)
682
+ object PyArray_MatrixProduct (object, object)
683
+ object PyArray_CopyAndTranspose (object)
684
+ object PyArray_Correlate (object, object, int)
685
+ int PyArray_TypestrConvert (int, int)
686
+ #int PyArray_DescrConverter (object, dtype*) except 0
687
+ #int PyArray_DescrConverter2 (object, dtype*) except 0
688
+ int PyArray_IntpConverter (object, PyArray_Dims *) except 0
689
+ #int PyArray_BufferConverter (object, chunk) except 0
690
+ int PyArray_AxisConverter (object, int *) except 0
691
+ int PyArray_BoolConverter (object, npy_bool *) except 0
692
+ int PyArray_ByteorderConverter (object, char *) except 0
693
+ int PyArray_OrderConverter (object, NPY_ORDER *) except 0
694
+ unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors
695
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
696
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
697
+ object PyArray_Where (object, object, object)
698
+ object PyArray_Arange (double, double, double, int)
699
+ #object PyArray_ArangeObj (object, object, object, dtype)
700
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0
701
+ object PyArray_LexSort (object, int)
702
+ object PyArray_Round (ndarray, int, ndarray)
703
+ unsigned char PyArray_EquivTypenums (int, int)
704
+ int PyArray_RegisterDataType (dtype) except -1
705
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1
706
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1
707
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
708
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
709
+ int PyArray_TypeNumFromName (char *)
710
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0
711
+ #int PyArray_OutputConverter (object, ndarray*) except 0
712
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
713
+ void _PyArray_SigintHandler (int)
714
+ void* _PyArray_GetSigintBuf ()
715
+ #int PyArray_DescrAlignConverter (object, dtype*) except 0
716
+ #int PyArray_DescrAlignConverter2 (object, dtype*) except 0
717
+ int PyArray_SearchsideConverter (object, void *) except 0
718
+ object PyArray_CheckAxis (ndarray, int *, int)
719
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
720
+ int PyArray_CompareString (char *, char *, size_t)
721
+ int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead.
722
+
723
+
724
+ # Typedefs that matches the runtime dtype objects in
725
+ # the numpy module.
726
+
727
+ # The ones that are commented out needs an IFDEF function
728
+ # in Cython to enable them only on the right systems.
729
+
730
+ ctypedef npy_int8 int8_t
731
+ ctypedef npy_int16 int16_t
732
+ ctypedef npy_int32 int32_t
733
+ ctypedef npy_int64 int64_t
734
+ #ctypedef npy_int96 int96_t
735
+ #ctypedef npy_int128 int128_t
736
+
737
+ ctypedef npy_uint8 uint8_t
738
+ ctypedef npy_uint16 uint16_t
739
+ ctypedef npy_uint32 uint32_t
740
+ ctypedef npy_uint64 uint64_t
741
+ #ctypedef npy_uint96 uint96_t
742
+ #ctypedef npy_uint128 uint128_t
743
+
744
+ ctypedef npy_float32 float32_t
745
+ ctypedef npy_float64 float64_t
746
+ #ctypedef npy_float80 float80_t
747
+ #ctypedef npy_float128 float128_t
748
+
749
+ ctypedef float complex complex64_t
750
+ ctypedef double complex complex128_t
751
+
752
+ # The int types are mapped a bit surprising --
753
+ # numpy.int corresponds to 'l' and numpy.long to 'q'
754
+ ctypedef npy_long int_t
755
+ ctypedef npy_longlong longlong_t
756
+
757
+ ctypedef npy_ulong uint_t
758
+ ctypedef npy_ulonglong ulonglong_t
759
+
760
+ ctypedef npy_intp intp_t
761
+ ctypedef npy_uintp uintp_t
762
+
763
+ ctypedef npy_double float_t
764
+ ctypedef npy_double double_t
765
+ ctypedef npy_longdouble longdouble_t
766
+
767
+ ctypedef npy_cfloat cfloat_t
768
+ ctypedef npy_cdouble cdouble_t
769
+ ctypedef npy_clongdouble clongdouble_t
770
+
771
+ ctypedef npy_cdouble complex_t
772
+
773
+ cdef inline object PyArray_MultiIterNew1(a):
774
+ return PyArray_MultiIterNew(1, <void*>a)
775
+
776
+ cdef inline object PyArray_MultiIterNew2(a, b):
777
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
778
+
779
+ cdef inline object PyArray_MultiIterNew3(a, b, c):
780
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
781
+
782
+ cdef inline object PyArray_MultiIterNew4(a, b, c, d):
783
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
784
+
785
+ cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
786
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
787
+
788
+ cdef inline tuple PyDataType_SHAPE(dtype d):
789
+ if PyDataType_HASSUBARRAY(d):
790
+ return <tuple>d.subarray.shape
791
+ else:
792
+ return ()
793
+
794
+
795
+ cdef extern from "numpy/ndarrayobject.h":
796
+ PyTypeObject PyTimedeltaArrType_Type
797
+ PyTypeObject PyDatetimeArrType_Type
798
+ ctypedef int64_t npy_timedelta
799
+ ctypedef int64_t npy_datetime
800
+
801
+ cdef extern from "numpy/ndarraytypes.h":
802
+ ctypedef struct PyArray_DatetimeMetaData:
803
+ NPY_DATETIMEUNIT base
804
+ int64_t num
805
+
806
+ cdef extern from "numpy/arrayscalars.h":
807
+
808
+ # abstract types
809
+ ctypedef class numpy.generic [object PyObject]:
810
+ pass
811
+ ctypedef class numpy.number [object PyObject]:
812
+ pass
813
+ ctypedef class numpy.integer [object PyObject]:
814
+ pass
815
+ ctypedef class numpy.signedinteger [object PyObject]:
816
+ pass
817
+ ctypedef class numpy.unsignedinteger [object PyObject]:
818
+ pass
819
+ ctypedef class numpy.inexact [object PyObject]:
820
+ pass
821
+ ctypedef class numpy.floating [object PyObject]:
822
+ pass
823
+ ctypedef class numpy.complexfloating [object PyObject]:
824
+ pass
825
+ ctypedef class numpy.flexible [object PyObject]:
826
+ pass
827
+ ctypedef class numpy.character [object PyObject]:
828
+ pass
829
+
830
+ ctypedef struct PyDatetimeScalarObject:
831
+ # PyObject_HEAD
832
+ npy_datetime obval
833
+ PyArray_DatetimeMetaData obmeta
834
+
835
+ ctypedef struct PyTimedeltaScalarObject:
836
+ # PyObject_HEAD
837
+ npy_timedelta obval
838
+ PyArray_DatetimeMetaData obmeta
839
+
840
+ ctypedef enum NPY_DATETIMEUNIT:
841
+ NPY_FR_Y
842
+ NPY_FR_M
843
+ NPY_FR_W
844
+ NPY_FR_D
845
+ NPY_FR_B
846
+ NPY_FR_h
847
+ NPY_FR_m
848
+ NPY_FR_s
849
+ NPY_FR_ms
850
+ NPY_FR_us
851
+ NPY_FR_ns
852
+ NPY_FR_ps
853
+ NPY_FR_fs
854
+ NPY_FR_as
855
+ NPY_FR_GENERIC
856
+
857
+
858
+ #
859
+ # ufunc API
860
+ #
861
+
862
+ cdef extern from "numpy/ufuncobject.h":
863
+
864
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
865
+
866
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
867
+ cdef:
868
+ int nin, nout, nargs
869
+ int identity
870
+ PyUFuncGenericFunction *functions
871
+ void **data
872
+ int ntypes
873
+ int check_return
874
+ char *name
875
+ char *types
876
+ char *doc
877
+ void *ptr
878
+ PyObject *obj
879
+ PyObject *userloops
880
+
881
+ cdef enum:
882
+ PyUFunc_Zero
883
+ PyUFunc_One
884
+ PyUFunc_None
885
+ UFUNC_ERR_IGNORE
886
+ UFUNC_ERR_WARN
887
+ UFUNC_ERR_RAISE
888
+ UFUNC_ERR_CALL
889
+ UFUNC_ERR_PRINT
890
+ UFUNC_ERR_LOG
891
+ UFUNC_MASK_DIVIDEBYZERO
892
+ UFUNC_MASK_OVERFLOW
893
+ UFUNC_MASK_UNDERFLOW
894
+ UFUNC_MASK_INVALID
895
+ UFUNC_SHIFT_DIVIDEBYZERO
896
+ UFUNC_SHIFT_OVERFLOW
897
+ UFUNC_SHIFT_UNDERFLOW
898
+ UFUNC_SHIFT_INVALID
899
+ UFUNC_FPE_DIVIDEBYZERO
900
+ UFUNC_FPE_OVERFLOW
901
+ UFUNC_FPE_UNDERFLOW
902
+ UFUNC_FPE_INVALID
903
+ UFUNC_ERR_DEFAULT
904
+ UFUNC_ERR_DEFAULT2
905
+
906
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
907
+ void **, char *, int, int, int, int, char *, char *, int)
908
+ int PyUFunc_RegisterLoopForType(ufunc, int,
909
+ PyUFuncGenericFunction, int *, void *) except -1
910
+ void PyUFunc_f_f_As_d_d \
911
+ (char **, npy_intp *, npy_intp *, void *)
912
+ void PyUFunc_d_d \
913
+ (char **, npy_intp *, npy_intp *, void *)
914
+ void PyUFunc_f_f \
915
+ (char **, npy_intp *, npy_intp *, void *)
916
+ void PyUFunc_g_g \
917
+ (char **, npy_intp *, npy_intp *, void *)
918
+ void PyUFunc_F_F_As_D_D \
919
+ (char **, npy_intp *, npy_intp *, void *)
920
+ void PyUFunc_F_F \
921
+ (char **, npy_intp *, npy_intp *, void *)
922
+ void PyUFunc_D_D \
923
+ (char **, npy_intp *, npy_intp *, void *)
924
+ void PyUFunc_G_G \
925
+ (char **, npy_intp *, npy_intp *, void *)
926
+ void PyUFunc_O_O \
927
+ (char **, npy_intp *, npy_intp *, void *)
928
+ void PyUFunc_ff_f_As_dd_d \
929
+ (char **, npy_intp *, npy_intp *, void *)
930
+ void PyUFunc_ff_f \
931
+ (char **, npy_intp *, npy_intp *, void *)
932
+ void PyUFunc_dd_d \
933
+ (char **, npy_intp *, npy_intp *, void *)
934
+ void PyUFunc_gg_g \
935
+ (char **, npy_intp *, npy_intp *, void *)
936
+ void PyUFunc_FF_F_As_DD_D \
937
+ (char **, npy_intp *, npy_intp *, void *)
938
+ void PyUFunc_DD_D \
939
+ (char **, npy_intp *, npy_intp *, void *)
940
+ void PyUFunc_FF_F \
941
+ (char **, npy_intp *, npy_intp *, void *)
942
+ void PyUFunc_GG_G \
943
+ (char **, npy_intp *, npy_intp *, void *)
944
+ void PyUFunc_OO_O \
945
+ (char **, npy_intp *, npy_intp *, void *)
946
+ void PyUFunc_O_O_method \
947
+ (char **, npy_intp *, npy_intp *, void *)
948
+ void PyUFunc_OO_O_method \
949
+ (char **, npy_intp *, npy_intp *, void *)
950
+ void PyUFunc_On_Om \
951
+ (char **, npy_intp *, npy_intp *, void *)
952
+ int PyUFunc_GetPyValues \
953
+ (char *, int *, int *, PyObject **)
954
+ int PyUFunc_checkfperr \
955
+ (int, PyObject *, int *)
956
+ void PyUFunc_clearfperr()
957
+ int PyUFunc_getfperr()
958
+ int PyUFunc_handlefperr \
959
+ (int, PyObject *, int, int *) except -1
960
+ int PyUFunc_ReplaceLoopBySignature \
961
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
962
+ object PyUFunc_FromFuncAndDataAndSignature \
963
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
964
+ int, char *, char *, int, char *)
965
+
966
+ int _import_umath() except -1
967
+
968
+ cdef inline void set_array_base(ndarray arr, object base):
969
+ Py_INCREF(base) # important to do this before stealing the reference below!
970
+ PyArray_SetBaseObject(arr, base)
971
+
972
+ cdef inline object get_array_base(ndarray arr):
973
+ base = PyArray_BASE(arr)
974
+ if base is NULL:
975
+ return None
976
+ return <object>base
977
+
978
+ # Versions of the import_* functions which are more suitable for
979
+ # Cython code.
980
+ cdef inline int import_array() except -1:
981
+ try:
982
+ __pyx_import_array()
983
+ except Exception:
984
+ raise ImportError("numpy.core.multiarray failed to import")
985
+
986
+ cdef inline int import_umath() except -1:
987
+ try:
988
+ _import_umath()
989
+ except Exception:
990
+ raise ImportError("numpy.core.umath failed to import")
991
+
992
+ cdef inline int import_ufunc() except -1:
993
+ try:
994
+ _import_umath()
995
+ except Exception:
996
+ raise ImportError("numpy.core.umath failed to import")
997
+
998
+
999
+ cdef inline bint is_timedelta64_object(object obj):
1000
+ """
1001
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
1002
+
1003
+ Parameters
1004
+ ----------
1005
+ obj : object
1006
+
1007
+ Returns
1008
+ -------
1009
+ bool
1010
+ """
1011
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
1012
+
1013
+
1014
+ cdef inline bint is_datetime64_object(object obj):
1015
+ """
1016
+ Cython equivalent of `isinstance(obj, np.datetime64)`
1017
+
1018
+ Parameters
1019
+ ----------
1020
+ obj : object
1021
+
1022
+ Returns
1023
+ -------
1024
+ bool
1025
+ """
1026
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
1027
+
1028
+
1029
+ cdef inline npy_datetime get_datetime64_value(object obj) nogil:
1030
+ """
1031
+ returns the int64 value underlying scalar numpy datetime64 object
1032
+
1033
+ Note that to interpret this as a datetime, the corresponding unit is
1034
+ also needed. That can be found using `get_datetime64_unit`.
1035
+ """
1036
+ return (<PyDatetimeScalarObject*>obj).obval
1037
+
1038
+
1039
+ cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:
1040
+ """
1041
+ returns the int64 value underlying scalar numpy timedelta64 object
1042
+ """
1043
+ return (<PyTimedeltaScalarObject*>obj).obval
1044
+
1045
+
1046
+ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:
1047
+ """
1048
+ returns the unit part of the dtype for a numpy datetime64 object.
1049
+ """
1050
+ return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base