applied-ai-018 commited on
Commit
475e4f6
·
verified ·
1 Parent(s): 1d06125

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/accelerate/utils/dataclasses.py +1758 -0
  16. llmeval-env/lib/python3.10/site-packages/accelerate/utils/environment.py +274 -0
  17. llmeval-env/lib/python3.10/site-packages/accelerate/utils/imports.py +403 -0
  18. llmeval-env/lib/python3.10/site-packages/accelerate/utils/launch.py +626 -0
  19. llmeval-env/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py +1435 -0
  20. llmeval-env/lib/python3.10/site-packages/accelerate/utils/modeling.py +1802 -0
  21. llmeval-env/lib/python3.10/site-packages/accelerate/utils/offload.py +213 -0
  22. llmeval-env/lib/python3.10/site-packages/accelerate/utils/operations.py +848 -0
  23. llmeval-env/lib/python3.10/site-packages/accelerate/utils/random.py +124 -0
  24. llmeval-env/lib/python3.10/site-packages/accelerate/utils/rich.py +24 -0
  25. llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/INSTALLER +1 -0
  26. llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/LICENSE +21 -0
  27. llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/METADATA +117 -0
  28. llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/top_level.txt +1 -0
  29. llmeval-env/lib/python3.10/site-packages/networkx/__init__.py +49 -0
  30. llmeval-env/lib/python3.10/site-packages/networkx/conftest.py +289 -0
  31. llmeval-env/lib/python3.10/site-packages/networkx/convert.py +494 -0
  32. llmeval-env/lib/python3.10/site-packages/networkx/convert_matrix.py +1202 -0
  33. llmeval-env/lib/python3.10/site-packages/networkx/exception.py +125 -0
  34. llmeval-env/lib/python3.10/site-packages/networkx/generators/__init__.py +33 -0
  35. llmeval-env/lib/python3.10/site-packages/networkx/generators/atlas.py +179 -0
  36. llmeval-env/lib/python3.10/site-packages/networkx/generators/classic.py +1054 -0
  37. llmeval-env/lib/python3.10/site-packages/networkx/generators/cographs.py +67 -0
  38. llmeval-env/lib/python3.10/site-packages/networkx/generators/community.py +1069 -0
  39. llmeval-env/lib/python3.10/site-packages/networkx/generators/degree_seq.py +868 -0
  40. llmeval-env/lib/python3.10/site-packages/networkx/generators/directed.py +501 -0
  41. llmeval-env/lib/python3.10/site-packages/networkx/generators/duplication.py +163 -0
  42. llmeval-env/lib/python3.10/site-packages/networkx/generators/ego.py +65 -0
  43. llmeval-env/lib/python3.10/site-packages/networkx/generators/expanders.py +475 -0
  44. llmeval-env/lib/python3.10/site-packages/networkx/generators/geometric.py +1047 -0
  45. llmeval-env/lib/python3.10/site-packages/networkx/generators/harary_graph.py +199 -0
  46. llmeval-env/lib/python3.10/site-packages/networkx/generators/internet_as_graphs.py +441 -0
  47. llmeval-env/lib/python3.10/site-packages/networkx/generators/intersection.py +124 -0
  48. llmeval-env/lib/python3.10/site-packages/networkx/generators/interval_graph.py +69 -0
  49. llmeval-env/lib/python3.10/site-packages/networkx/generators/joint_degree_seq.py +664 -0
  50. llmeval-env/lib/python3.10/site-packages/networkx/generators/lattice.py +367 -0
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/bnb.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/dataclasses.cpython-310.pyc ADDED
Binary file (58.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/environment.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/fsdp_utils.cpython-310.pyc ADDED
Binary file (5.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/imports.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/megatron_lm.cpython-310.pyc ADDED
Binary file (38.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/memory.cpython-310.pyc ADDED
Binary file (4.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/modeling.cpython-310.pyc ADDED
Binary file (53.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/offload.cpython-310.pyc ADDED
Binary file (6.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/operations.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/rich.cpython-310.pyc ADDED
Binary file (417 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/torch_xla.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/transformer_engine.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/__pycache__/versions.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/accelerate/utils/dataclasses.py ADDED
@@ -0,0 +1,1758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ General namespace and dataclass related classes
17
+ """
18
+
19
+ import argparse
20
+ import copy
21
+ import enum
22
+ import functools
23
+ import os
24
+ import typing
25
+ import warnings
26
+ from contextlib import contextmanager
27
+ from dataclasses import dataclass, field
28
+ from datetime import timedelta
29
+ from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, get_args
30
+
31
+ import torch
32
+
33
+ from .constants import FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE
34
+ from .environment import str_to_bool
35
+ from .imports import is_cuda_available, is_npu_available, is_xpu_available
36
+ from .versions import compare_versions
37
+
38
+
39
+ class KwargsHandler:
40
+ """
41
+ Internal mixin that implements a `to_kwargs()` method for a dataclass.
42
+ """
43
+
44
+ def to_dict(self):
45
+ return copy.deepcopy(self.__dict__)
46
+
47
+ def to_kwargs(self):
48
+ """
49
+ Returns a dictionary containing the attributes with values different from the default of this class.
50
+ """
51
+ # import clear_environment here to avoid circular import problem
52
+ from .other import clear_environment
53
+
54
+ with clear_environment():
55
+ default_dict = self.__class__().to_dict()
56
+ this_dict = self.to_dict()
57
+ return {k: v for k, v in this_dict.items() if default_dict[k] != v}
58
+
59
+
60
+ @dataclass
61
+ class AutocastKwargs(KwargsHandler):
62
+ """
63
+ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
64
+ documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
65
+ information on each argument.
66
+
67
+ Example:
68
+
69
+ ```python
70
+ from accelerate import Accelerator
71
+ from accelerate.utils import AutocastKwargs
72
+
73
+ kwargs = AutocastKwargs(cache_enabled=True)
74
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
75
+ ```
76
+ """
77
+
78
+ enabled: bool = True
79
+ cache_enabled: bool = None
80
+
81
+
82
+ @dataclass
83
+ class DistributedDataParallelKwargs(KwargsHandler):
84
+ """
85
+ Use this object in your [`Accelerator`] to customize how your model is wrapped in a
86
+ `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
87
+ [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
88
+ information on each argument.
89
+
90
+ <Tip warning={true}>
91
+
92
+ `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
93
+
94
+ `static_graph` is only available in PyTorch 1.11.0 and later versions.
95
+
96
+ </Tip>
97
+
98
+ Example:
99
+
100
+ ```python
101
+ from accelerate import Accelerator
102
+ from accelerate.utils import DistributedDataParallelKwargs
103
+
104
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
105
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
106
+ ```
107
+ """
108
+
109
+ dim: int = 0
110
+ broadcast_buffers: bool = True
111
+ bucket_cap_mb: int = 25
112
+ find_unused_parameters: bool = False
113
+ check_reduction: bool = False
114
+ gradient_as_bucket_view: bool = False
115
+ static_graph: bool = False
116
+
117
+
118
+ @dataclass
119
+ class GradScalerKwargs(KwargsHandler):
120
+ """
121
+ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
122
+ `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
123
+ [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
124
+
125
+ <Tip warning={true}>
126
+
127
+ `GradScaler` is only available in PyTorch 1.5.0 and later versions.
128
+
129
+ </Tip>
130
+
131
+ Example:
132
+
133
+ ```python
134
+ from accelerate import Accelerator
135
+ from accelerate.utils import GradScalerKwargs
136
+
137
+ kwargs = GradScalerKwargs(backoff_filter=0.25)
138
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
139
+ ```
140
+ """
141
+
142
+ init_scale: float = 65536.0
143
+ growth_factor: float = 2.0
144
+ backoff_factor: float = 0.5
145
+ growth_interval: int = 2000
146
+ enabled: bool = True
147
+
148
+
149
+ @dataclass
150
+ class InitProcessGroupKwargs(KwargsHandler):
151
+ """
152
+ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer
153
+ to the documentation of this
154
+ [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
155
+ information on each argument.
156
+
157
+ ```python
158
+ from datetime import timedelta
159
+ from accelerate import Accelerator
160
+ from accelerate.utils import InitProcessGroupKwargs
161
+
162
+ kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
163
+ accelerator = Accelerator(kwargs_handlers=[kwargs])
164
+ ```
165
+ """
166
+
167
+ backend: Optional[str] = "nccl"
168
+ init_method: Optional[str] = None
169
+ timeout: timedelta = timedelta(seconds=1800)
170
+
171
+
172
+ # Literals
173
+ Backend = Literal["MSAMP", "TE"]
174
+ OptLevel = Literal["O1", "O2"]
175
+ FP8Format = Literal["E4M3", "HYBRID"]
176
+ AmaxComputeAlgorithm = Literal["max", "most_recent"]
177
+
178
+
179
+ @dataclass
180
+ class FP8RecipeKwargs(KwargsHandler):
181
+ """
182
+ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
183
+ training with `transformer-engine` or `ms-amp`.
184
+
185
+ <Tip>
186
+
187
+ For more information on `transformer-engine` args, please refer to the API
188
+ [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
189
+
190
+ For more information on the `ms-amp` args, please refer to the Optimization Level
191
+ [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
192
+
193
+ </Tip>
194
+
195
+ ```python
196
+ from accelerate import Accelerator
197
+ from accelerate.utils import FP8RecipeKwargs
198
+
199
+ kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
200
+ accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
201
+ ```
202
+
203
+ To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
204
+
205
+ ```python
206
+ kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
207
+ ```
208
+
209
+ Args:
210
+ backend (`str`, *optional*, defaults to "msamp"):
211
+ Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
212
+ margin (`int`, *optional*, default to 0):
213
+ The margin to use for the gradient scaling.
214
+ interval (`int`, *optional*, default to 1):
215
+ The interval to use for how often the scaling factor is recomputed.
216
+ fp8_format (`str`, *optional*, default to "E4M3"):
217
+ The format to use for the FP8 recipe. Must be one of `E4M3` or `HYBRID`.
218
+ amax_history_len (`int`, *optional*, default to 1024):
219
+ The length of the history to use for the scaling factor computation
220
+ amax_compute_algo (`str`, *optional*, default to "most_recent"):
221
+ The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`.
222
+ override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`):
223
+ Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision.
224
+ optimization_level (`str`), one of `O1`, `O2`. (default is `O2`):
225
+ What level of 8-bit collective communication should be used with MS-AMP. In general:
226
+ * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU
227
+ memory usage and communication bandwidth
228
+ * O2: First-order optimizer states are in 8-bit, and second order states are in FP16.
229
+ Only available when using Adam or AdamW. This maintains accuracy and can potentially save the
230
+ highest memory.
231
+ * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models
232
+ are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not
233
+ available currently).
234
+ """
235
+
236
+ backend: Backend = "MSAMP"
237
+ opt_level: OptLevel = "O2"
238
+ margin: int = 0
239
+ interval: int = 1
240
+ fp8_format: FP8Format = "E4M3"
241
+ amax_history_len: int = 1
242
+ amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
243
+ override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
244
+
245
+ def __post_init__(self):
246
+ if self.backend.upper() not in get_args(Backend):
247
+ raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).")
248
+
249
+ self.backend = self.backend.upper()
250
+ # Check TE args
251
+ if self.backend == "TE":
252
+ self.fp8_format = self.fp8_format.upper()
253
+ if self.fp8_format not in get_args(FP8Format):
254
+ raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.")
255
+ if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm):
256
+ raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}")
257
+ elif self.backend == "MSAMP":
258
+ if self.opt_level not in get_args(OptLevel):
259
+ raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
260
+
261
+
262
+ class EnumWithContains(enum.EnumMeta):
263
+ "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
264
+
265
+ def __contains__(cls, item):
266
+ try:
267
+ cls(item)
268
+ except ValueError:
269
+ return False
270
+ return True
271
+
272
+
273
+ class BaseEnum(enum.Enum, metaclass=EnumWithContains):
274
+ "An enum class that can get the value of an item with `str(Enum.key)`"
275
+
276
+ def __str__(self):
277
+ return self.value
278
+
279
+ @classmethod
280
+ def list(cls):
281
+ "Method to list all the possible items in `cls`"
282
+ return list(map(str, cls))
283
+
284
+
285
+ class DeprecatedFieldDescriptor:
286
+ """
287
+ Descriptor for deprecated fields in an enum class.
288
+
289
+ Args:
290
+ field_name (`str`):
291
+ The name of the deprecated field.
292
+ replaced_with (`str`):
293
+ The name of the field that replaces the deprecated one.
294
+ """
295
+
296
+ def __init__(self, field_name, replaced_with):
297
+ self.field_name = field_name
298
+ self.replaced_with = replaced_with
299
+
300
+ def __get__(self, instance, owner):
301
+ warnings.warn(
302
+ f"The `{self.field_name}` of `{owner}` is deprecated and will be removed in v1.0.0. "
303
+ f"Please use the `{self.replaced_with}` instead.",
304
+ FutureWarning,
305
+ )
306
+ return getattr(owner, self.replaced_with)
307
+
308
+
309
+ class DistributedType(str, enum.Enum):
310
+ """
311
+ Represents a type of distributed environment.
312
+
313
+ Values:
314
+
315
+ - **NO** -- Not a distributed environment, just a single process.
316
+ - **MULTI_CPU** -- Distributed on multiple CPU nodes.
317
+ - **MULTI_GPU** -- Distributed on multiple GPUs.
318
+ - **MULTI_MLU** -- Distributed on multiple MLUs.
319
+ - **MULTI_NPU** -- Distributed on multiple NPUs.
320
+ - **MULTI_XPU** -- Distributed on multiple XPUs.
321
+ - **DEEPSPEED** -- Using DeepSpeed.
322
+ - **XLA** -- Using TorchXLA.
323
+ - **TPU** -- This field will be deprecated in v0.27.0. Use XLA instead.
324
+ """
325
+
326
+ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box.
327
+ NO = "NO"
328
+ MULTI_CPU = "MULTI_CPU"
329
+ MULTI_GPU = "MULTI_GPU"
330
+ MULTI_NPU = "MULTI_NPU"
331
+ MULTI_MLU = "MULTI_MLU"
332
+ MULTI_XPU = "MULTI_XPU"
333
+ DEEPSPEED = "DEEPSPEED"
334
+ FSDP = "FSDP"
335
+ XLA = "XLA"
336
+ MEGATRON_LM = "MEGATRON_LM"
337
+ TPU = DeprecatedFieldDescriptor("TPU", "XLA")
338
+
339
+
340
+ class SageMakerDistributedType(str, enum.Enum):
341
+ """
342
+ Represents a type of distributed environment.
343
+
344
+ Values:
345
+
346
+ - **NO** -- Not a distributed environment, just a single process.
347
+ - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
348
+ - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
349
+ """
350
+
351
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
352
+ NO = "NO"
353
+ DATA_PARALLEL = "DATA_PARALLEL"
354
+ MODEL_PARALLEL = "MODEL_PARALLEL"
355
+
356
+
357
+ class ComputeEnvironment(str, enum.Enum):
358
+ """
359
+ Represents a type of the compute environment.
360
+
361
+ Values:
362
+
363
+ - **LOCAL_MACHINE** -- private/custom cluster hardware.
364
+ - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
365
+ """
366
+
367
+ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
368
+ LOCAL_MACHINE = "LOCAL_MACHINE"
369
+ AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
370
+
371
+
372
+ class DynamoBackend(str, BaseEnum):
373
+ """
374
+ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html).
375
+
376
+ Values:
377
+
378
+ - **NO** -- Do not use torch dynamo.
379
+ - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
380
+ issues.
381
+ - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's
382
+ extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups.
383
+ - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton
384
+ kernels. [Read
385
+ more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)
386
+ - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read
387
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
388
+ - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read
389
+ more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593)
390
+ - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757)
391
+ - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read
392
+ more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html)
393
+ - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read
394
+ more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst)
395
+ - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/)
396
+ - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read
397
+ more](https://github.com/onnx/onnx-tensorrt)
398
+ - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
399
+ more](https://github.com/intel/intel-extension-for-pytorch).
400
+ - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
401
+
402
+ """
403
+
404
+ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
405
+ NO = "NO"
406
+ EAGER = "EAGER"
407
+ AOT_EAGER = "AOT_EAGER"
408
+ INDUCTOR = "INDUCTOR"
409
+ AOT_TS_NVFUSER = "AOT_TS_NVFUSER"
410
+ NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER"
411
+ CUDAGRAPHS = "CUDAGRAPHS"
412
+ OFI = "OFI"
413
+ FX2TRT = "FX2TRT"
414
+ ONNXRT = "ONNXRT"
415
+ TENSORRT = "TENSORRT"
416
+ IPEX = "IPEX"
417
+ TVM = "TVM"
418
+
419
+
420
+ class LoggerType(BaseEnum):
421
+ """Represents a type of supported experiment tracker
422
+
423
+ Values:
424
+
425
+ - **ALL** -- all available trackers in the environment that are supported
426
+ - **TENSORBOARD** -- TensorBoard as an experiment tracker
427
+ - **WANDB** -- wandb as an experiment tracker
428
+ - **COMETML** -- comet_ml as an experiment tracker
429
+ - **DVCLIVE** -- dvclive as an experiment tracker
430
+ """
431
+
432
+ ALL = "all"
433
+ AIM = "aim"
434
+ TENSORBOARD = "tensorboard"
435
+ WANDB = "wandb"
436
+ COMETML = "comet_ml"
437
+ MLFLOW = "mlflow"
438
+ CLEARML = "clearml"
439
+ DVCLIVE = "dvclive"
440
+
441
+
442
+ class PrecisionType(BaseEnum):
443
+ """Represents a type of precision used on floating point values
444
+
445
+ Values:
446
+
447
+ - **NO** -- using full precision (FP32)
448
+ - **FP16** -- using half precision
449
+ - **BF16** -- using brain floating point precision
450
+ """
451
+
452
+ NO = "no"
453
+ FP8 = "fp8"
454
+ FP16 = "fp16"
455
+ BF16 = "bf16"
456
+
457
+
458
+ class RNGType(BaseEnum):
459
+ TORCH = "torch"
460
+ CUDA = "cuda"
461
+ MLU = "mlu"
462
+ NPU = "npu"
463
+ XLA = "xla"
464
+ XPU = "xpu"
465
+ GENERATOR = "generator"
466
+
467
+
468
+ class CustomDtype(enum.Enum):
469
+ r"""
470
+ An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
471
+ """
472
+
473
+ FP8 = "fp8"
474
+ INT4 = "int4"
475
+ INT2 = "int2"
476
+
477
+
478
+ # data classes
479
+
480
+
481
+ @dataclass
482
+ class TensorInformation:
483
+ shape: torch.Size
484
+ dtype: torch.dtype
485
+
486
+
487
+ @dataclass
488
+ class DataLoaderConfiguration:
489
+ """
490
+ Configuration for dataloader-related items when calling `accelerator.prepare`.
491
+ """
492
+
493
+ split_batches: bool = field(
494
+ default=False,
495
+ metadata={
496
+ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If"
497
+ " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a"
498
+ " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set"
499
+ " in your script multiplied by the number of processes."
500
+ },
501
+ )
502
+ dispatch_batches: bool = field(
503
+ default=None,
504
+ metadata={
505
+ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process"
506
+ " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose"
507
+ " underlying dataset is an `IterableDataslet`, `False` otherwise."
508
+ },
509
+ )
510
+ even_batches: bool = field(
511
+ default=True,
512
+ metadata={
513
+ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the"
514
+ " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among"
515
+ " all workers."
516
+ },
517
+ )
518
+ use_seedable_sampler: bool = field(
519
+ default=False,
520
+ metadata={
521
+ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])."
522
+ "Ensures training results are fully reproducable using a different sampling technique. "
523
+ "While seed-to-seed results may differ, on average the differences are neglible when using"
524
+ "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results."
525
+ },
526
+ )
527
+ non_blocking: bool = field(
528
+ default=False,
529
+ metadata={
530
+ "help": "If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device"
531
+ " transfers, allowing for better overlap between dataloader communication and computation. Recommended that the"
532
+ " prepared dataloader has `pin_memory` set to `True` to work properly."
533
+ },
534
+ )
535
+
536
+
537
+ @dataclass
538
+ class ProjectConfiguration:
539
+ """
540
+ Configuration for the Accelerator object based on inner-project needs.
541
+ """
542
+
543
+ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."})
544
+ logging_dir: str = field(
545
+ default=None,
546
+ metadata={
547
+ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`."
548
+ },
549
+ )
550
+ automatic_checkpoint_naming: bool = field(
551
+ default=False,
552
+ metadata={"help": "Whether saved states should be automatically iteratively named."},
553
+ )
554
+
555
+ total_limit: int = field(
556
+ default=None,
557
+ metadata={"help": "The maximum number of total saved states to keep."},
558
+ )
559
+
560
+ iteration: int = field(
561
+ default=0,
562
+ metadata={"help": "The current save iteration."},
563
+ )
564
+
565
+ save_on_each_node: bool = field(
566
+ default=False,
567
+ metadata={
568
+ "help": (
569
+ "When doing multi-node distributed training, whether to save models and checkpoints on each node, or"
570
+ " only on the main one"
571
+ )
572
+ },
573
+ )
574
+
575
+ def set_directories(self, project_dir: str = None):
576
+ "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
577
+ self.project_dir = project_dir
578
+ if self.logging_dir is None:
579
+ self.logging_dir = project_dir
580
+
581
+ def __post_init__(self):
582
+ self.set_directories(self.project_dir)
583
+
584
+
585
+ @dataclass
586
+ class GradientAccumulationPlugin(KwargsHandler):
587
+ """
588
+ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or
589
+ `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error.
590
+
591
+ Parameters:
592
+ num_steps (`int`):
593
+ The number of steps to accumulate gradients for.
594
+ adjust_scheduler (`bool`, *optional*, defaults to `True`):
595
+ Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be
596
+ `True` if the used scheduler was not adjusted for gradient accumulation.
597
+ sync_with_dataloader (`bool`, *optional*, defaults to `True`):
598
+ Whether to synchronize setting the gradients when at the end of the dataloader.
599
+ sync_each_batch (`bool`, *optional*):
600
+ Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory
601
+ requirements when using gradient accumulation with distributed training, at expense of speed.
602
+
603
+ Example:
604
+
605
+ ```python
606
+ from accelerate.utils import GradientAccumulationPlugin
607
+
608
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2)
609
+ accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin)
610
+ ```
611
+ """
612
+
613
+ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."})
614
+ adjust_scheduler: bool = field(
615
+ default=True,
616
+ metadata={
617
+ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation."
618
+ },
619
+ )
620
+ sync_with_dataloader: bool = field(
621
+ default=True,
622
+ metadata={
623
+ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
624
+ },
625
+ )
626
+ sync_each_batch: bool = field(
627
+ default=False,
628
+ metadata={
629
+ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed."
630
+ },
631
+ )
632
+
633
+
634
+ @dataclass
635
+ class TorchDynamoPlugin(KwargsHandler):
636
+ """
637
+ This plugin is used to compile a model with PyTorch 2.0
638
+ """
639
+
640
+ backend: DynamoBackend = field(
641
+ default=None,
642
+ metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"},
643
+ )
644
+ mode: str = field(
645
+ default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"}
646
+ )
647
+ fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"})
648
+ dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
649
+ options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
650
+ disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
651
+
652
+ def __post_init__(self):
653
+ prefix = "ACCELERATE_DYNAMO_"
654
+ if self.backend is None:
655
+ self.backend = os.environ.get(prefix + "BACKEND", "no")
656
+ self.backend = DynamoBackend(self.backend.upper())
657
+ if self.mode is None:
658
+ self.mode = os.environ.get(prefix + "MODE", "default")
659
+ if self.fullgraph is None:
660
+ self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
661
+ if self.dynamic is None:
662
+ self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
663
+
664
+ def to_dict(self):
665
+ dynamo_config = copy.deepcopy(self.__dict__)
666
+ dynamo_config["backend"] = dynamo_config["backend"].value.lower()
667
+ return dynamo_config
668
+
669
+
670
+ @dataclass
671
+ class DeepSpeedPlugin:
672
+ """
673
+ This plugin is used to integrate DeepSpeed.
674
+ """
675
+
676
+ hf_ds_config: Any = field(
677
+ default=None,
678
+ metadata={
679
+ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`."
680
+ },
681
+ )
682
+ gradient_accumulation_steps: int = field(
683
+ default=None,
684
+ metadata={
685
+ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly."
686
+ },
687
+ )
688
+ gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"})
689
+ zero_stage: int = field(
690
+ default=None,
691
+ metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"},
692
+ )
693
+ is_train_batch_min: bool = field(
694
+ default=True,
695
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"},
696
+ )
697
+ offload_optimizer_device: str = field(
698
+ default=None,
699
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."},
700
+ )
701
+ offload_param_device: str = field(
702
+ default=None,
703
+ metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."},
704
+ )
705
+ offload_optimizer_nvme_path: str = field(
706
+ default=None,
707
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
708
+ )
709
+ offload_param_nvme_path: str = field(
710
+ default=None,
711
+ metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."},
712
+ )
713
+ zero3_init_flag: bool = field(
714
+ default=None,
715
+ metadata={
716
+ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models."
717
+ "Only applicable with ZeRO Stage-3."
718
+ },
719
+ )
720
+ zero3_save_16bit_model: bool = field(
721
+ default=None,
722
+ metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
723
+ )
724
+ transformer_moe_cls_names: str = field(
725
+ default=None,
726
+ metadata={
727
+ "help": "comma-separated list of transformers MoE layer class names (case-sensitive), e.g : "
728
+ " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
729
+ },
730
+ )
731
+
732
+ def __post_init__(self):
733
+ from .deepspeed import HfDeepSpeedConfig
734
+
735
+ if self.gradient_accumulation_steps is None:
736
+ gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
737
+ self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
738
+
739
+ if self.gradient_clipping is None:
740
+ gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "auto")
741
+ self.gradient_clipping = gradient_clipping if gradient_clipping == "auto" else float(gradient_clipping)
742
+
743
+ if self.zero_stage is None:
744
+ self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
745
+
746
+ if self.offload_optimizer_device is None:
747
+ self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
748
+
749
+ if self.offload_param_device is None:
750
+ self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
751
+
752
+ if self.offload_optimizer_nvme_path is None:
753
+ self.offload_optimizer_nvme_path = os.environ.get(
754
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
755
+ )
756
+
757
+ if self.offload_param_nvme_path is None:
758
+ self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
759
+
760
+ if self.zero3_save_16bit_model is None:
761
+ self.zero3_save_16bit_model = (
762
+ os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
763
+ )
764
+
765
+ if self.hf_ds_config is None:
766
+ self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
767
+ if (
768
+ isinstance(self.hf_ds_config, dict)
769
+ or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none")
770
+ or isinstance(self.hf_ds_config, HfDeepSpeedConfig)
771
+ ):
772
+ if not isinstance(self.hf_ds_config, HfDeepSpeedConfig):
773
+ self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config)
774
+ if "gradient_accumulation_steps" not in self.hf_ds_config.config:
775
+ self.hf_ds_config.config["gradient_accumulation_steps"] = 1
776
+ if "zero_optimization" not in self.hf_ds_config.config:
777
+ raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
778
+
779
+ self._deepspeed_config_checks()
780
+ plugin_to_config_mapping = {
781
+ "gradient_accumulation_steps": "gradient_accumulation_steps",
782
+ "gradient_clipping": "gradient_clipping",
783
+ "zero_stage": "zero_optimization.stage",
784
+ "offload_optimizer_device": "zero_optimization.offload_optimizer.device",
785
+ "offload_param_device": "zero_optimization.offload_param.device",
786
+ "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path",
787
+ "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path",
788
+ "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save",
789
+ }
790
+ kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None}
791
+ for key in kwargs.keys():
792
+ self.fill_match(key, **kwargs, must_match=False)
793
+ self.hf_ds_config.set_stage_and_offload()
794
+
795
+ # filling the missing values in the class attributes from the DeepSpeed config
796
+ # when using the DeepSpeed config file.
797
+ for key, value in plugin_to_config_mapping.items():
798
+ config_value = self.hf_ds_config.get_value(value)
799
+ if config_value is not None and config_value != "auto":
800
+ setattr(self, key, config_value)
801
+ else:
802
+ config = {
803
+ "train_batch_size": "auto",
804
+ "train_micro_batch_size_per_gpu": "auto",
805
+ "gradient_accumulation_steps": self.gradient_accumulation_steps,
806
+ "zero_optimization": {
807
+ "stage": self.zero_stage,
808
+ "offload_optimizer": {
809
+ "device": self.offload_optimizer_device,
810
+ "nvme_path": self.offload_optimizer_nvme_path
811
+ if self.offload_optimizer_device == "nvme"
812
+ else None,
813
+ },
814
+ "offload_param": {
815
+ "device": self.offload_param_device,
816
+ "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None,
817
+ },
818
+ "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model,
819
+ },
820
+ }
821
+ if self.gradient_clipping:
822
+ config["gradient_clipping"] = self.gradient_clipping
823
+ self.hf_ds_config = HfDeepSpeedConfig(config)
824
+
825
+ self.deepspeed_config = self.hf_ds_config.config
826
+ self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
827
+ if self.zero3_init_flag is None:
828
+ self.zero3_init_flag = (
829
+ str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1
830
+ )
831
+ if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
832
+ warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
833
+ self.zero3_init_flag = False
834
+
835
+ def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
836
+ mismatches = [] if mismatches is None else mismatches
837
+ config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
838
+ if config is None:
839
+ return
840
+
841
+ if config.get(ds_key) == "auto":
842
+ if ds_key_long in kwargs:
843
+ config[ds_key] = kwargs[ds_key_long]
844
+ return
845
+ else:
846
+ raise ValueError(
847
+ f"`{ds_key_long}` not found in kwargs. "
848
+ f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or "
849
+ "pass it in kwargs."
850
+ )
851
+
852
+ if not must_match:
853
+ return
854
+
855
+ ds_val = config.get(ds_key)
856
+ if ds_val is not None and ds_key_long in kwargs:
857
+ if ds_val != kwargs[ds_key_long]:
858
+ mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
859
+
860
+ def is_auto(self, ds_key_long):
861
+ val = self.hf_ds_config.get_value(ds_key_long)
862
+ if val is None:
863
+ return False
864
+ else:
865
+ return val == "auto"
866
+
867
+ def get_value(self, ds_key_long, default=None):
868
+ return self.hf_ds_config.get_value(ds_key_long, default)
869
+
870
+ def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
871
+ """Process the DeepSpeed config with the values from the kwargs."""
872
+ mismatches = [] if mismatches is None else mismatches
873
+ if config is None:
874
+ config = self.deepspeed_config
875
+ for key, value in config.items():
876
+ if isinstance(value, dict):
877
+ self.deepspeed_config_process(
878
+ prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs
879
+ )
880
+ else:
881
+ self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs)
882
+ if len(mismatches) > 0 and prefix == "":
883
+ mismatches_msg = "\n".join(mismatches)
884
+ raise ValueError(
885
+ "Please correct the following DeepSpeed config values that mismatch kwargs "
886
+ f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
887
+ )
888
+
889
+ def set_mixed_precision(self, mixed_precision):
890
+ ds_config = self.deepspeed_config
891
+ kwargs = {
892
+ "fp16.enabled": mixed_precision == "fp16",
893
+ "bf16.enabled": mixed_precision == "bf16",
894
+ }
895
+ if mixed_precision == "fp16":
896
+ if "fp16" not in ds_config:
897
+ ds_config["fp16"] = {"enabled": True, "auto_cast": True}
898
+ elif mixed_precision == "bf16":
899
+ if "bf16" not in ds_config:
900
+ ds_config["bf16"] = {"enabled": True}
901
+
902
+ if mixed_precision != "no":
903
+ diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
904
+ if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
905
+ raise ValueError(
906
+ f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file."
907
+ )
908
+ for dtype in ["fp16", "bf16"]:
909
+ if dtype not in ds_config:
910
+ ds_config[dtype] = {"enabled": False}
911
+ self.fill_match("fp16.enabled", must_match=False, **kwargs)
912
+ self.fill_match("bf16.enabled", must_match=False, **kwargs)
913
+
914
+ def set_deepspeed_weakref(self):
915
+ from .imports import is_transformers_available
916
+
917
+ if self.zero3_init_flag:
918
+ if not is_transformers_available():
919
+ raise Exception(
920
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
921
+ "Please run `pip install transformers`."
922
+ )
923
+ ds_config = copy.deepcopy(self.deepspeed_config)
924
+ if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto":
925
+ ds_config["gradient_accumulation_steps"] = 1
926
+ if (
927
+ "train_micro_batch_size_per_gpu" not in ds_config
928
+ or ds_config["train_micro_batch_size_per_gpu"] == "auto"
929
+ ):
930
+ ds_config["train_micro_batch_size_per_gpu"] = 1
931
+ if ds_config.get("train_batch_size", None) == "auto":
932
+ del ds_config["train_batch_size"]
933
+
934
+ if compare_versions("transformers", "<", "4.33"):
935
+ from transformers.deepspeed import HfDeepSpeedConfig
936
+ else:
937
+ from transformers.integrations import HfDeepSpeedConfig
938
+
939
+ self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
940
+
941
+ def is_zero3_init_enabled(self):
942
+ return self.zero3_init_flag
943
+
944
+ @contextmanager
945
+ def zero3_init_context_manager(self, enable=False):
946
+ old = self.zero3_init_flag
947
+ if old == enable:
948
+ yield
949
+ else:
950
+ self.zero3_init_flag = enable
951
+ self.dschf = None
952
+ self.set_deepspeed_weakref()
953
+ yield
954
+ self.zero3_init_flag = old
955
+ self.dschf = None
956
+ self.set_deepspeed_weakref()
957
+
958
+ def _deepspeed_config_checks(self):
959
+ env_variable_names_to_ignore = [
960
+ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
961
+ "ACCELERATE_GRADIENT_CLIPPING",
962
+ "ACCELERATE_DEEPSPEED_ZERO_STAGE",
963
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE",
964
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE",
965
+ "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH",
966
+ "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH",
967
+ "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL",
968
+ "ACCELERATE_MIXED_PRECISION",
969
+ ]
970
+ env_variable_names_to_ignore = [
971
+ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
972
+ ]
973
+
974
+ deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
975
+
976
+ if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
977
+ raise ValueError(
978
+ f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
979
+ "Please specify them appropriately in the DeepSpeed config file.\n"
980
+ "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n"
981
+ "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
982
+ "It will only ask for the necessary config variables when using `deepspeed_config_file`."
983
+ )
984
+
985
+ def set_moe_leaf_modules(self, model):
986
+ if self.transformer_moe_cls_names is None:
987
+ self.transformer_moe_cls_names = os.environ.get("ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES", None)
988
+ if self.transformer_moe_cls_names is not None:
989
+ if compare_versions("deepspeed", "<", "0.14.0"):
990
+ raise ImportError("DeepSpeed version must be >= 0.14.0 to use MOE support. Please update DeepSpeed.")
991
+ from deepspeed.utils import set_z3_leaf_modules
992
+
993
+ class_names = self.transformer_moe_cls_names.split(",")
994
+ transformer_moe_cls = []
995
+ for layer_class in class_names:
996
+ transformer_cls = get_module_class_from_name(model, layer_class)
997
+ if transformer_cls is None:
998
+ raise Exception(
999
+ f"Could not find a transformer layer class called '{layer_class}' to wrap in the model."
1000
+ )
1001
+ else:
1002
+ transformer_moe_cls.append(transformer_cls)
1003
+ set_z3_leaf_modules(model, transformer_moe_cls) # z3_leaf
1004
+
1005
+
1006
+ @dataclass
1007
+ class FullyShardedDataParallelPlugin:
1008
+ """
1009
+ This plugin is used to enable fully sharded data parallelism.
1010
+ """
1011
+
1012
+ sharding_strategy: "typing.Any" = field(
1013
+ default=None,
1014
+ metadata={
1015
+ "help": "FSDP Sharding Strategy of type `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`"
1016
+ },
1017
+ )
1018
+ backward_prefetch: "typing.Any" = field(
1019
+ default=None,
1020
+ metadata={
1021
+ "help": "FSDP Backward Prefetch of type `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`"
1022
+ },
1023
+ )
1024
+ mixed_precision_policy: "typing.Any" = field(
1025
+ default=None,
1026
+ metadata={
1027
+ "help": "A config to enable mixed precision training with FullyShardedDataParallel. "
1028
+ "The 3 flags that are set are `param_dtype`, `reduce_dtype`, `buffer_dtype`. "
1029
+ "Each flag expects `torch.dtype` as the value. "
1030
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.MixedPrecision`."
1031
+ },
1032
+ )
1033
+ auto_wrap_policy: Optional[Callable] = field(
1034
+ default=None,
1035
+ metadata={"help": "A callable specifying a policy to recursively wrap layers with FSDP"},
1036
+ )
1037
+ cpu_offload: "typing.Any" = field(
1038
+ default=None,
1039
+ metadata={
1040
+ "help": "Decides Whether to offload parameters and gradients to CPU. "
1041
+ "It is of type `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`."
1042
+ },
1043
+ )
1044
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = field(
1045
+ default=None,
1046
+ metadata={"help": "A list of modules to ignore for FSDP."},
1047
+ )
1048
+ state_dict_type: "typing.Any" = field(
1049
+ default=None,
1050
+ metadata={
1051
+ "help": "FSDP State Dict Type of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictType`"
1052
+ },
1053
+ )
1054
+ state_dict_config: "typing.Any" = field(
1055
+ default=None,
1056
+ metadata={
1057
+ "help": "FSDP State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.StateDictConfig`"
1058
+ },
1059
+ )
1060
+ optim_state_dict_config: "typing.Any" = field(
1061
+ default=None,
1062
+ metadata={
1063
+ "help": "FSDP Optimizer State Dict Config of type `torch.distributed.fsdp.fully_sharded_data_parallel.OptimStateDictConfig`"
1064
+ },
1065
+ )
1066
+ limit_all_gathers: bool = field(
1067
+ default=True,
1068
+ metadata={
1069
+ "help": "If False, then FSDP allows the CPU thread to schedule all-gathers "
1070
+ "without any extra synchronization. If True, then FSDP explicitly synchronizes the CPU thread to prevent "
1071
+ "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. "
1072
+ "Enabling this can help lower the number of CUDA malloc retries."
1073
+ },
1074
+ )
1075
+ use_orig_params: bool = field(
1076
+ default=True,
1077
+ metadata={
1078
+ "help": "If `True`, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. "
1079
+ "Useful in cases such as parameter-efficient fine-tuning. "
1080
+ "Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). "
1081
+ "This also enables multiple optimizer param groups. This should be `True` when creating an optimizer object before preparing/wrapping the model with FSDP."
1082
+ },
1083
+ )
1084
+ param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field(
1085
+ default=None,
1086
+ metadata={
1087
+ "help": "A Callable[torch.nn.Module] -> None that specifies how modules "
1088
+ "that are currently on the meta device should be initialized onto an actual device."
1089
+ },
1090
+ )
1091
+ sync_module_states: bool = field(
1092
+ default=True,
1093
+ metadata={
1094
+ "help": "If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0 "
1095
+ "to ensure they are the same across all ranks after initialization"
1096
+ },
1097
+ )
1098
+ forward_prefetch: bool = field(
1099
+ default=False,
1100
+ metadata={
1101
+ "help": "If True, then FSDP explicitly prefetches the next upcoming "
1102
+ "all-gather while executing in the forward pass. only use with Static graphs."
1103
+ },
1104
+ )
1105
+ activation_checkpointing: bool = field(
1106
+ default=False,
1107
+ metadata={
1108
+ "help": "If True, activation checkpointing is a technique to reduce memory usage by clearing activations of "
1109
+ "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time "
1110
+ "for reduced memory usage."
1111
+ },
1112
+ )
1113
+
1114
+ def __post_init__(self):
1115
+ from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
1116
+
1117
+ prefix = "FSDP_"
1118
+ if self.sharding_strategy is None:
1119
+ sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
1120
+ sharding_strategy = (
1121
+ FSDP_SHARDING_STRATEGY.index(sharding_strategy) + 1
1122
+ if not sharding_strategy.isdigit()
1123
+ else int(sharding_strategy)
1124
+ )
1125
+ self.sharding_strategy = ShardingStrategy(sharding_strategy)
1126
+
1127
+ if self.cpu_offload is None:
1128
+ if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
1129
+ self.cpu_offload = CPUOffload(offload_params=True)
1130
+ else:
1131
+ self.cpu_offload = CPUOffload(offload_params=False)
1132
+
1133
+ if self.backward_prefetch is None:
1134
+ prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
1135
+ if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
1136
+ self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
1137
+
1138
+ if self.state_dict_type is None:
1139
+ state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
1140
+ self.set_state_dict_type(state_dict_type_policy)
1141
+ self.use_orig_params = str_to_bool(os.environ.get(prefix + "USE_ORIG_PARAMS", "False")) == 1
1142
+ self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
1143
+ self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
1144
+ self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
1145
+
1146
+ if str_to_bool(os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING", "False")) == 1 and not self.sync_module_states:
1147
+ warnings.warn(
1148
+ "sync_module_states cannot be False since efficient cpu ram loading enabled. "
1149
+ "Setting sync_module_states to True."
1150
+ )
1151
+ self.sync_module_states = True
1152
+
1153
+ if self.sync_module_states:
1154
+ if is_npu_available():
1155
+ device = torch.npu.current_device()
1156
+ elif is_cuda_available():
1157
+ device = torch.cuda.current_device()
1158
+ elif is_xpu_available():
1159
+ device = torch.xpu.current_device()
1160
+ else:
1161
+ raise RuntimeError(
1162
+ "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
1163
+ )
1164
+ self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
1165
+
1166
+ def set_auto_wrap_policy(self, model):
1167
+ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
1168
+
1169
+ default_transformer_cls_names_to_wrap = (
1170
+ ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
1171
+ )
1172
+ if self.auto_wrap_policy is None:
1173
+ auto_wrap_policy = os.environ.get("FSDP_AUTO_WRAP_POLICY", "NO_WRAP")
1174
+ if auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[0]:
1175
+ transformer_cls_names_to_wrap = os.environ.get(
1176
+ "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap
1177
+ ).split(",")
1178
+ transformer_cls_to_wrap = set()
1179
+ for layer_class in transformer_cls_names_to_wrap:
1180
+ transformer_cls = get_module_class_from_name(model, layer_class)
1181
+ if transformer_cls is None:
1182
+ raise Exception("Could not find the transformer layer class to wrap in the model.")
1183
+ else:
1184
+ transformer_cls_to_wrap.add(transformer_cls)
1185
+
1186
+ self.auto_wrap_policy = functools.partial(
1187
+ transformer_auto_wrap_policy,
1188
+ # Transformer layer class to wrap
1189
+ transformer_layer_cls=transformer_cls_to_wrap,
1190
+ )
1191
+ elif auto_wrap_policy == FSDP_AUTO_WRAP_POLICY[1]:
1192
+ min_num_params = int(os.environ.get("FSDP_MIN_NUM_PARAMS", 0))
1193
+ if min_num_params > 0:
1194
+ self.auto_wrap_policy = functools.partial(
1195
+ size_based_auto_wrap_policy, min_num_params=min_num_params
1196
+ )
1197
+
1198
+ def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False):
1199
+ if isinstance(mixed_precision, str):
1200
+ if mixed_precision == "fp16":
1201
+ dtype = torch.float16
1202
+ elif mixed_precision == "bf16":
1203
+ dtype = torch.bfloat16
1204
+ elif mixed_precision == "fp32":
1205
+ dtype = torch.float32
1206
+ else:
1207
+ raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
1208
+ else:
1209
+ dtype = mixed_precision
1210
+
1211
+ buffer_dtype = torch.float32 if buffer_autocast else dtype
1212
+ from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
1213
+
1214
+ if self.mixed_precision_policy is None or override:
1215
+ self.mixed_precision_policy = MixedPrecision(
1216
+ param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype
1217
+ )
1218
+
1219
+ def set_state_dict_type(self, state_dict_type_policy):
1220
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
1221
+ FullOptimStateDictConfig,
1222
+ FullStateDictConfig,
1223
+ StateDictType,
1224
+ )
1225
+
1226
+ self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
1227
+
1228
+ if self.state_dict_type == StateDictType.FULL_STATE_DICT:
1229
+ if self.state_dict_config is None:
1230
+ self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1231
+ if self.optim_state_dict_config is None:
1232
+ self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
1233
+
1234
+
1235
+ @dataclass
1236
+ class MegatronLMPlugin:
1237
+ """
1238
+ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective
1239
+ activation recomputation and optimized fused kernels.
1240
+ """
1241
+
1242
+ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."})
1243
+ pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."})
1244
+ num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."})
1245
+ gradient_clipping: float = field(
1246
+ default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"}
1247
+ )
1248
+ sequence_parallelism: bool = field(
1249
+ default=None,
1250
+ metadata={"help": "enable sequence parallelism"},
1251
+ )
1252
+ recompute_activations: bool = field(
1253
+ default=None,
1254
+ metadata={"help": "enable selective activation recomputation"},
1255
+ )
1256
+ use_distributed_optimizer: bool = field(
1257
+ default=None,
1258
+ metadata={"help": "enable distributed optimizer"},
1259
+ )
1260
+ pipeline_model_parallel_split_rank: int = field(
1261
+ default=None, metadata={"help": "Rank where encoder and decoder should be split."}
1262
+ )
1263
+ num_layers_per_virtual_pipeline_stage: int = field(
1264
+ default=None, metadata={"help": "Number of layers per virtual pipeline stage."}
1265
+ )
1266
+ is_train_batch_min: str = field(
1267
+ default=True,
1268
+ metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"},
1269
+ )
1270
+ train_iters: int = field(
1271
+ default=None,
1272
+ metadata={
1273
+ "help": "Total number of iterations to train over all training runs. "
1274
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1275
+ },
1276
+ )
1277
+ train_samples: int = field(
1278
+ default=None,
1279
+ metadata={
1280
+ "help": "Total number of samples to train over all training runs. "
1281
+ "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`"
1282
+ },
1283
+ )
1284
+ weight_decay_incr_style: str = field(
1285
+ default="constant",
1286
+ metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '},
1287
+ )
1288
+ start_weight_decay: float = field(
1289
+ default=None,
1290
+ metadata={"help": "Initial weight decay coefficient for L2 regularization."},
1291
+ )
1292
+ end_weight_decay: float = field(
1293
+ default=None,
1294
+ metadata={"help": "End of run weight decay coefficient for L2 regularization."},
1295
+ )
1296
+ lr_decay_style: str = field(
1297
+ default="linear",
1298
+ metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."},
1299
+ )
1300
+ lr_decay_iters: int = field(
1301
+ default=None,
1302
+ metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."},
1303
+ )
1304
+ lr_decay_samples: int = field(
1305
+ default=None,
1306
+ metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."},
1307
+ )
1308
+ lr_warmup_iters: int = field(
1309
+ default=None,
1310
+ metadata={"help": "number of iterations to linearly warmup learning rate over."},
1311
+ )
1312
+ lr_warmup_samples: int = field(
1313
+ default=None,
1314
+ metadata={"help": "number of samples to linearly warmup learning rate over."},
1315
+ )
1316
+ lr_warmup_fraction: float = field(
1317
+ default=None,
1318
+ metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."},
1319
+ )
1320
+ min_lr: float = field(
1321
+ default=0,
1322
+ metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."},
1323
+ )
1324
+ consumed_samples: List[int] = field(
1325
+ default=None,
1326
+ metadata={
1327
+ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call."
1328
+ },
1329
+ )
1330
+ no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."})
1331
+ scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."})
1332
+ lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."})
1333
+ megatron_dataset_flag: bool = field(
1334
+ default=False,
1335
+ metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."},
1336
+ )
1337
+ seq_length: int = field(
1338
+ default=None,
1339
+ metadata={"help": "Maximum sequence length to process."},
1340
+ )
1341
+ encoder_seq_length: int = field(
1342
+ default=None,
1343
+ metadata={"help": "Maximum sequence length to process for the encoder."},
1344
+ )
1345
+ decoder_seq_length: int = field(
1346
+ default=None,
1347
+ metadata={"help": "Maximum sequence length to process for the decoder."},
1348
+ )
1349
+ tensorboard_dir: str = field(
1350
+ default=None,
1351
+ metadata={"help": "Path to save tensorboard logs."},
1352
+ )
1353
+ set_all_logging_options: bool = field(
1354
+ default=False,
1355
+ metadata={"help": "Whether to set all logging options."},
1356
+ )
1357
+ eval_iters: int = field(
1358
+ default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."}
1359
+ )
1360
+ eval_interval: int = field(
1361
+ default=1000, metadata={"help": "Interval between running evaluation on validation set."}
1362
+ )
1363
+ return_logits: bool = field(
1364
+ default=False,
1365
+ metadata={"help": "Whether to return logits from the model."},
1366
+ )
1367
+
1368
+ # custom train step args
1369
+ custom_train_step_class: Optional[Any] = field(
1370
+ default=None,
1371
+ metadata={"help": "Custom train step class."},
1372
+ )
1373
+ custom_train_step_kwargs: Optional[Dict[str, Any]] = field(
1374
+ default=None,
1375
+ metadata={"help": "Custom train step kwargs."},
1376
+ )
1377
+
1378
+ # custom model args
1379
+ custom_model_provider_function: Optional[Callable] = field(
1380
+ default=None,
1381
+ metadata={"help": "Custom model provider function."},
1382
+ )
1383
+ custom_prepare_model_function: Optional[Callable] = field(
1384
+ default=None,
1385
+ metadata={"help": "Custom prepare model function."},
1386
+ )
1387
+
1388
+ # remaining args such as enabling Alibi/ROPE positional embeddings,
1389
+ # wandb logging, Multi-Query Attention, etc.
1390
+ other_megatron_args: Optional[Dict[str, Any]] = field(
1391
+ default=None,
1392
+ metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
1393
+ )
1394
+
1395
+ def __post_init__(self):
1396
+ prefix = "MEGATRON_LM_"
1397
+ if self.tp_degree is None:
1398
+ self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1))
1399
+ if self.pp_degree is None:
1400
+ self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1))
1401
+ if self.num_micro_batches is None:
1402
+ self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1))
1403
+ if self.gradient_clipping is None:
1404
+ self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0))
1405
+ if self.recompute_activations is None:
1406
+ self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1
1407
+ if self.use_distributed_optimizer is None:
1408
+ self.use_distributed_optimizer = (
1409
+ str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1
1410
+ )
1411
+ if self.sequence_parallelism is None:
1412
+ self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
1413
+
1414
+ if self.pp_degree > 1 or self.use_distributed_optimizer:
1415
+ self.DDP_impl = "local"
1416
+ else:
1417
+ self.DDP_impl = "torch"
1418
+
1419
+ if self.consumed_samples is not None:
1420
+ if len(self.consumed_samples) == 1:
1421
+ self.consumed_samples.extend([0, 0])
1422
+ elif len(self.consumed_samples) == 2:
1423
+ self.consumed_samples.append(0)
1424
+
1425
+ self.megatron_lm_default_args = {
1426
+ "tensor_model_parallel_size": self.tp_degree,
1427
+ "pipeline_model_parallel_size": self.pp_degree,
1428
+ "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank,
1429
+ "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage,
1430
+ "DDP_impl": self.DDP_impl,
1431
+ "use_distributed_optimizer": self.use_distributed_optimizer,
1432
+ "sequence_parallel": self.sequence_parallelism,
1433
+ "clip_grad": self.gradient_clipping,
1434
+ "num_micro_batches": self.num_micro_batches,
1435
+ "consumed_samples": self.consumed_samples,
1436
+ "no_wd_decay_cond": self.no_wd_decay_cond,
1437
+ "scale_lr_cond": self.scale_lr_cond,
1438
+ "lr_mult": self.lr_mult,
1439
+ "megatron_dataset_flag": self.megatron_dataset_flag,
1440
+ "eval_iters": self.eval_iters,
1441
+ "eval_interval": self.eval_interval,
1442
+ }
1443
+ if self.recompute_activations:
1444
+ self.megatron_lm_default_args["recompute_granularity"] = "selective"
1445
+ if self.tensorboard_dir is not None:
1446
+ self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir
1447
+ if self.set_all_logging_options:
1448
+ self.set_tensorboard_logging_options()
1449
+ if self.other_megatron_args is not None:
1450
+ self.megatron_lm_default_args.update(self.other_megatron_args)
1451
+
1452
+ def set_network_size_args(self, model, batch_data=None):
1453
+ # Check if the model is either BERT, GPT or T5 else raise error
1454
+ # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
1455
+ if "megatron-bert" in model.config.model_type.lower():
1456
+ model_type_name = "bert"
1457
+ num_layers = model.config.num_hidden_layers
1458
+ hidden_size = model.config.hidden_size
1459
+ num_attention_heads = model.config.num_attention_heads
1460
+ max_position_embeddings = model.config.max_position_embeddings
1461
+ num_labels = model.config.num_labels
1462
+ orig_vocab_size = model.config.vocab_size
1463
+ if "maskedlm" in model.__class__.__name__.lower():
1464
+ pretraining_flag = True
1465
+ if self.seq_length is not None:
1466
+ if self.encoder_seq_length is not None:
1467
+ warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.")
1468
+ self.seq_length = self.encoder_seq_length
1469
+ elif self.encoder_seq_length is not None:
1470
+ self.seq_length = self.encoder_seq_length
1471
+ elif batch_data is not None:
1472
+ self.seq_length = batch_data["input_ids"].shape[1]
1473
+ else:
1474
+ self.seq_length = max_position_embeddings
1475
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1476
+ elif "gpt2" in model.config.model_type.lower():
1477
+ model_type_name = "gpt"
1478
+ num_layers = model.config.n_layer
1479
+ hidden_size = model.config.n_embd
1480
+ num_attention_heads = model.config.n_head
1481
+ max_position_embeddings = model.config.n_positions
1482
+ orig_vocab_size = model.config.vocab_size
1483
+ pretraining_flag = True
1484
+ if self.seq_length is not None:
1485
+ if self.decoder_seq_length is not None:
1486
+ warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.")
1487
+ self.seq_length = self.decoder_seq_length
1488
+ elif self.decoder_seq_length is not None:
1489
+ self.seq_length = self.decoder_seq_length
1490
+ elif batch_data is not None:
1491
+ self.seq_length = batch_data["input_ids"].shape[1]
1492
+ else:
1493
+ self.seq_length = max_position_embeddings
1494
+ self.megatron_lm_default_args["seq_length"] = self.seq_length
1495
+ self.megatron_lm_default_args["return_logits"] = self.return_logits
1496
+ self.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer"
1497
+ elif "t5" in model.config.model_type.lower():
1498
+ model_type_name = "t5"
1499
+ num_layers = model.config.num_layers
1500
+ hidden_size = model.config.d_model
1501
+ num_attention_heads = model.config.num_heads
1502
+ max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024
1503
+ orig_vocab_size = model.config.vocab_size
1504
+ pretraining_flag = True
1505
+ if self.encoder_seq_length is None:
1506
+ if batch_data is not None:
1507
+ self.encoder_seq_length = batch_data["input_ids"].shape[1]
1508
+ else:
1509
+ self.encoder_seq_length = max_position_embeddings
1510
+ if self.decoder_seq_length is None:
1511
+ if batch_data is not None:
1512
+ self.decoder_seq_length = batch_data["labels"].shape[1]
1513
+ else:
1514
+ self.decoder_seq_length = max_position_embeddings
1515
+
1516
+ self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
1517
+ self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
1518
+ else:
1519
+ raise ValueError(
1520
+ "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
1521
+ "Please check the model you are using is one of those."
1522
+ )
1523
+
1524
+ self.megatron_lm_default_args["model_type_name"] = model_type_name
1525
+ self.megatron_lm_default_args["num_layers"] = num_layers
1526
+ self.megatron_lm_default_args["hidden_size"] = hidden_size
1527
+ self.megatron_lm_default_args["num_attention_heads"] = num_attention_heads
1528
+ self.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings
1529
+ self.megatron_lm_default_args["pretraining_flag"] = pretraining_flag
1530
+ self.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size
1531
+ self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
1532
+ if model_type_name == "bert":
1533
+ self.megatron_lm_default_args["num_labels"] = num_labels
1534
+
1535
+ def set_mixed_precision(self, mixed_precision):
1536
+ if mixed_precision == "fp16":
1537
+ self.megatron_lm_default_args["fp16"] = True
1538
+ elif mixed_precision == "bf16":
1539
+ self.megatron_lm_default_args["bf16"] = True
1540
+ self.DDP_impl = "local"
1541
+ self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
1542
+
1543
+ def set_training_args(self, micro_batch_size, dp_degree):
1544
+ self.data_parallel_size = dp_degree
1545
+ self.micro_batch_size = micro_batch_size
1546
+ self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches
1547
+ self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
1548
+ self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
1549
+ self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
1550
+
1551
+ def set_optimizer_type(self, optimizer):
1552
+ optimizer_name = optimizer.__class__.__name__.lower()
1553
+ if "adam" in optimizer_name:
1554
+ self.megatron_lm_default_args["optimizer"] = "adam"
1555
+ self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0]
1556
+ self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1]
1557
+ self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"]
1558
+ elif "sgd" in optimizer_name:
1559
+ self.megatron_lm_default_args["optimizer"] = "sgd"
1560
+ self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
1561
+ else:
1562
+ raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
1563
+
1564
+ self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
1565
+ self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
1566
+
1567
+ def set_scheduler_args(self, scheduler):
1568
+ if self.train_iters is None:
1569
+ self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1570
+ if self.train_samples is not None:
1571
+ self.train_samples = None
1572
+ warnings.warn(
1573
+ "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training."
1574
+ )
1575
+ if self.lr_warmup_iters is None:
1576
+ self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"]
1577
+ if self.lr_warmup_samples is not None:
1578
+ warnings.warn(
1579
+ "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
1580
+ )
1581
+ self.lr_warmup_samples = 0
1582
+
1583
+ self.megatron_lm_default_args["train_iters"] = self.train_iters
1584
+ self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
1585
+ self.megatron_lm_default_args["train_samples"] = self.train_samples
1586
+ self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples
1587
+ self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters
1588
+ self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples
1589
+ self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction
1590
+ self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style
1591
+ self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style
1592
+ self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
1593
+ self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
1594
+ self.megatron_lm_default_args["min_lr"] = self.min_lr
1595
+
1596
+ def set_tensorboard_logging_options(self):
1597
+ from megatron.arguments import _add_logging_args
1598
+
1599
+ parser = argparse.ArgumentParser()
1600
+ parser = _add_logging_args(parser)
1601
+ logging_args = parser.parse_known_args()
1602
+ self.dataset_args = vars(logging_args[0])
1603
+ for key, value in self.dataset_args.items():
1604
+ if key.startswith("log_"):
1605
+ self.megatron_lm_default_args[key] = True
1606
+ elif key.startswith("no_log_"):
1607
+ self.megatron_lm_default_args[key.replace("no_", "")] = True
1608
+
1609
+
1610
+ @dataclass
1611
+ class BnbQuantizationConfig:
1612
+ """
1613
+ A plugin to enable BitsAndBytes 4bit and 8bit quantization
1614
+ """
1615
+
1616
+ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1617
+
1618
+ llm_int8_threshold: float = field(
1619
+ default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
1620
+ )
1621
+
1622
+ load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
1623
+
1624
+ bnb_4bit_quant_type: str = field(
1625
+ default="fp4",
1626
+ metadata={
1627
+ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
1628
+ },
1629
+ )
1630
+
1631
+ bnb_4bit_use_double_quant: bool = field(
1632
+ default=False,
1633
+ metadata={
1634
+ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
1635
+ },
1636
+ )
1637
+
1638
+ bnb_4bit_compute_dtype: bool = field(
1639
+ default="fp16",
1640
+ metadata={
1641
+ "help": "This sets the computational type which might be different than the input time. For example, inputs might be "
1642
+ "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
1643
+ },
1644
+ )
1645
+
1646
+ torch_dtype: torch.dtype = field(
1647
+ default=None,
1648
+ metadata={
1649
+ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value"
1650
+ "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
1651
+ },
1652
+ )
1653
+
1654
+ skip_modules: List[str] = field(
1655
+ default=None,
1656
+ metadata={
1657
+ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
1658
+ },
1659
+ )
1660
+
1661
+ keep_in_fp32_modules: List[str] = field(
1662
+ default=None,
1663
+ metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
1664
+ )
1665
+
1666
+ def __post_init__(self):
1667
+ """
1668
+ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
1669
+ """
1670
+ if not isinstance(self.load_in_8bit, bool):
1671
+ raise ValueError("load_in_8bit must be a boolean")
1672
+
1673
+ if not isinstance(self.load_in_4bit, bool):
1674
+ raise ValueError("load_in_4bit must be a boolean")
1675
+
1676
+ if self.load_in_4bit and self.load_in_8bit:
1677
+ raise ValueError("load_in_4bit and load_in_8 can't be both True")
1678
+
1679
+ if not self.load_in_4bit and not self.load_in_8bit:
1680
+ raise ValueError("load_in_4bit and load_in_8 can't be both False")
1681
+
1682
+ if not isinstance(self.llm_int8_threshold, (int, float)):
1683
+ raise ValueError("llm_int8_threshold must be a float or an int")
1684
+
1685
+ if not isinstance(self.bnb_4bit_quant_type, str):
1686
+ raise ValueError("bnb_4bit_quant_type must be a string")
1687
+ elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
1688
+ raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
1689
+
1690
+ if not isinstance(self.bnb_4bit_use_double_quant, bool):
1691
+ raise ValueError("bnb_4bit_use_double_quant must be a boolean")
1692
+
1693
+ if isinstance(self.bnb_4bit_compute_dtype, str):
1694
+ if self.bnb_4bit_compute_dtype == "fp32":
1695
+ self.bnb_4bit_compute_dtype = torch.float32
1696
+ elif self.bnb_4bit_compute_dtype == "fp16":
1697
+ self.bnb_4bit_compute_dtype = torch.float16
1698
+ elif self.bnb_4bit_compute_dtype == "bf16":
1699
+ self.bnb_4bit_compute_dtype = torch.bfloat16
1700
+ else:
1701
+ raise ValueError(
1702
+ f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}"
1703
+ )
1704
+ elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
1705
+ raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
1706
+
1707
+ if self.skip_modules is not None and not isinstance(self.skip_modules, list):
1708
+ raise ValueError("skip_modules must be a list of strings")
1709
+
1710
+ if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
1711
+ raise ValueError("keep_in_fp_32_modules must be a list of strings")
1712
+
1713
+ if self.load_in_4bit:
1714
+ self.target_dtype = CustomDtype.INT4
1715
+
1716
+ if self.load_in_8bit:
1717
+ self.target_dtype = torch.int8
1718
+
1719
+ if self.load_in_4bit and self.llm_int8_threshold != 6.0:
1720
+ warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
1721
+
1722
+ if isinstance(self.torch_dtype, str):
1723
+ if self.torch_dtype == "fp32":
1724
+ self.torch_dtype = torch.float32
1725
+ elif self.torch_dtype == "fp16":
1726
+ self.torch_dtype = torch.float16
1727
+ elif self.torch_dtype == "bf16":
1728
+ self.torch_dtype = torch.bfloat16
1729
+ else:
1730
+ raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
1731
+ if self.load_in_8bit and self.torch_dtype is None:
1732
+ self.torch_dtype = torch.float16
1733
+
1734
+ if self.load_in_4bit and self.torch_dtype is None:
1735
+ self.torch_dtype = self.bnb_4bit_compute_dtype
1736
+
1737
+ if not isinstance(self.torch_dtype, torch.dtype):
1738
+ raise ValueError("torch_dtype must be a torch.dtype")
1739
+
1740
+
1741
+ def get_module_class_from_name(module, name):
1742
+ """
1743
+ Gets a class from a module by its name.
1744
+
1745
+ Args:
1746
+ module (`torch.nn.Module`): The module to get the class from.
1747
+ name (`str`): The name of the class.
1748
+ """
1749
+ modules_children = list(module.children())
1750
+ if module.__class__.__name__ == name:
1751
+ return module.__class__
1752
+ elif len(modules_children) == 0:
1753
+ return
1754
+ else:
1755
+ for child_module in modules_children:
1756
+ module_class = get_module_class_from_name(child_module, name)
1757
+ if module_class is not None:
1758
+ return module_class
llmeval-env/lib/python3.10/site-packages/accelerate/utils/environment.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import math
17
+ import os
18
+ import platform
19
+ import subprocess
20
+ import sys
21
+ from dataclasses import dataclass, field
22
+ from functools import lru_cache
23
+ from shutil import which
24
+ from typing import List, Optional
25
+
26
+ import torch
27
+ from packaging.version import parse
28
+
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ def convert_dict_to_env_variables(current_env: dict):
34
+ """
35
+ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of
36
+ strings as the result.
37
+
38
+ Example:
39
+ ```python
40
+ >>> from accelerate.utils.environment import verify_env
41
+
42
+ >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"}
43
+ >>> valid_env_items = verify_env(env)
44
+ >>> print(valid_env_items)
45
+ ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
46
+ ```
47
+ """
48
+ forbidden_chars = [";", "\n", "<", ">", " "]
49
+ valid_env_items = []
50
+ for key, value in current_env.items():
51
+ if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1:
52
+ valid_env_items.append(f"{key}={value}\n")
53
+ else:
54
+ logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.")
55
+ return valid_env_items
56
+
57
+
58
+ def str_to_bool(value) -> int:
59
+ """
60
+ Converts a string representation of truth to `True` (1) or `False` (0).
61
+
62
+ True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
63
+ """
64
+ value = value.lower()
65
+ if value in ("y", "yes", "t", "true", "on", "1"):
66
+ return 1
67
+ elif value in ("n", "no", "f", "false", "off", "0"):
68
+ return 0
69
+ else:
70
+ raise ValueError(f"invalid truth value {value}")
71
+
72
+
73
+ def get_int_from_env(env_keys, default):
74
+ """Returns the first positive env value found in the `env_keys` list or the default."""
75
+ for e in env_keys:
76
+ val = int(os.environ.get(e, -1))
77
+ if val >= 0:
78
+ return val
79
+ return default
80
+
81
+
82
+ def parse_flag_from_env(key, default=False):
83
+ """Returns truthy value for `key` from the env if available else the default."""
84
+ value = os.environ.get(key, str(default))
85
+ return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
86
+
87
+
88
+ def parse_choice_from_env(key, default="no"):
89
+ value = os.environ.get(key, str(default))
90
+ return value
91
+
92
+
93
+ def are_libraries_initialized(*library_names: str) -> List[str]:
94
+ """
95
+ Checks if any of `library_names` are imported in the environment. Will return any names that are.
96
+ """
97
+ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
98
+
99
+
100
+ def _nvidia_smi():
101
+ """
102
+ Returns the right nvidia-smi command based on the system.
103
+ """
104
+ if platform.system() == "Windows":
105
+ # If platform is Windows and nvidia-smi can't be found in path
106
+ # try from systemd drive with default installation path
107
+ command = which("nvidia-smi")
108
+ if command is None:
109
+ command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"]
110
+ else:
111
+ command = "nvidia-smi"
112
+ return command
113
+
114
+
115
+ def get_gpu_info():
116
+ """
117
+ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
118
+
119
+ Largely based on the `gputil` library.
120
+ """
121
+ # Returns as list of `n` GPUs and their names
122
+ output = subprocess.check_output(
123
+ [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True
124
+ )
125
+ output = output.strip()
126
+ gpus = output.split(os.linesep)
127
+ # Get names from output
128
+ gpu_count = len(gpus)
129
+ gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
130
+ return gpu_names, gpu_count
131
+
132
+
133
+ def get_driver_version():
134
+ """
135
+ Returns the driver version
136
+
137
+ In the case of multiple GPUs, will return the first.
138
+ """
139
+ output = subprocess.check_output(
140
+ [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True
141
+ )
142
+ output = output.strip()
143
+ return output.split(os.linesep)[0]
144
+
145
+
146
+ def check_cuda_p2p_ib_support():
147
+ """
148
+ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
149
+ the 3090.
150
+
151
+ Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
152
+ """
153
+ try:
154
+ device_names, device_count = get_gpu_info()
155
+ # As new consumer GPUs get released, add them to `unsupported_devices``
156
+ unsupported_devices = {"RTX 40"}
157
+ if device_count > 1:
158
+ if any(
159
+ unsupported_device in device_name
160
+ for device_name in device_names
161
+ for unsupported_device in unsupported_devices
162
+ ):
163
+ # Check if they have the right driver version
164
+ acceptable_driver_version = "550.40.07"
165
+ current_driver_version = get_driver_version()
166
+ if parse(current_driver_version) < parse(acceptable_driver_version):
167
+ return False
168
+ return True
169
+ except Exception:
170
+ pass
171
+ return True
172
+
173
+
174
+ def check_fp8_capability():
175
+ """
176
+ Checks if all the current GPUs available support FP8.
177
+
178
+ Notably must initialize `torch.cuda` to check.
179
+ """
180
+ cuda_device_capacity = torch.cuda.get_device_capability()
181
+ return cuda_device_capacity >= (8, 9)
182
+
183
+
184
+ @dataclass
185
+ class CPUInformation:
186
+ """
187
+ Stores information about the CPU in a distributed environment. It contains the following attributes:
188
+ - rank: The rank of the current process.
189
+ - world_size: The total number of processes in the world.
190
+ - local_rank: The rank of the current process on the local node.
191
+ - local_world_size: The total number of processes on the local node.
192
+ """
193
+
194
+ rank: int = field(default=0, metadata={"help": "The rank of the current process."})
195
+ world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."})
196
+ local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."})
197
+ local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."})
198
+
199
+
200
+ def get_cpu_distributed_information() -> CPUInformation:
201
+ """
202
+ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation`
203
+ dataclass.
204
+ """
205
+ information = {}
206
+ information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
207
+ information["world_size"] = get_int_from_env(
208
+ ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1
209
+ )
210
+ information["local_rank"] = get_int_from_env(
211
+ ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
212
+ )
213
+ information["local_world_size"] = get_int_from_env(
214
+ ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
215
+ 1,
216
+ )
217
+ return CPUInformation(**information)
218
+
219
+
220
+ def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
221
+ """
222
+ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the
223
+ affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead.
224
+
225
+ Args:
226
+ local_process_index (int):
227
+ The index of the current process on the current server.
228
+ verbose (bool, *optional*):
229
+ Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
230
+ """
231
+ if verbose is None:
232
+ verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False)
233
+ if torch.cuda.is_available():
234
+ from accelerate.utils import is_pynvml_available
235
+
236
+ if not is_pynvml_available():
237
+ raise ImportError(
238
+ "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)"
239
+ )
240
+ import pynvml as nvml
241
+
242
+ # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py
243
+ nvml.nvmlInit()
244
+ num_elements = math.ceil(os.cpu_count() / 64)
245
+ handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index)
246
+ affinity_string = ""
247
+ for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements):
248
+ # assume nvml returns list of 64 bit ints
249
+ affinity_string = f"{j:064b}{affinity_string}"
250
+ affinity_list = [int(x) for x in affinity_string]
251
+ affinity_list.reverse() # so core 0 is the 0th element
252
+ affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0]
253
+ os.sched_setaffinity(0, affinity_to_set)
254
+ if verbose:
255
+ cpu_cores = os.sched_getaffinity(0)
256
+ logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
257
+
258
+
259
+ @lru_cache
260
+ def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
261
+ """
262
+ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node.
263
+
264
+ This result is cached between calls. If you want to override it, please use
265
+ `accelerate.utils.environment.override_numa_afifnity`.
266
+
267
+ Args:
268
+ local_process_index (int):
269
+ The index of the current process on the current server.
270
+ verbose (bool, *optional*):
271
+ Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will
272
+ default to True.
273
+ """
274
+ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
llmeval-env/lib/python3.10/site-packages/accelerate/utils/imports.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import importlib.metadata
17
+ import os
18
+ import warnings
19
+ from functools import lru_cache
20
+
21
+ import torch
22
+ from packaging import version
23
+ from packaging.version import parse
24
+
25
+ from .environment import parse_flag_from_env, str_to_bool
26
+ from .versions import compare_versions, is_torch_version
27
+
28
+
29
+ # Try to run Torch native job in an environment with TorchXLA installed by setting this value to 0.
30
+ USE_TORCH_XLA = parse_flag_from_env("USE_TORCH_XLA", default=True)
31
+
32
+ _torch_xla_available = False
33
+ if USE_TORCH_XLA:
34
+ try:
35
+ import torch_xla.core.xla_model as xm # noqa: F401
36
+ import torch_xla.runtime
37
+
38
+ _torch_xla_available = True
39
+ except ImportError:
40
+ pass
41
+
42
+ # Keep it for is_tpu_available. It will be removed along with is_tpu_available.
43
+ _tpu_available = _torch_xla_available
44
+
45
+ # Cache this result has it's a C FFI call which can be pretty time-consuming
46
+ _torch_distributed_available = torch.distributed.is_available()
47
+
48
+
49
+ def _is_package_available(pkg_name, metadata_name=None):
50
+ # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
51
+ package_exists = importlib.util.find_spec(pkg_name) is not None
52
+ if package_exists:
53
+ try:
54
+ # Some libraries have different names in the metadata
55
+ _ = importlib.metadata.metadata(pkg_name if metadata_name is None else metadata_name)
56
+ return True
57
+ except importlib.metadata.PackageNotFoundError:
58
+ return False
59
+
60
+
61
+ def is_torch_distributed_available() -> bool:
62
+ return _torch_distributed_available
63
+
64
+
65
+ def is_ccl_available():
66
+ try:
67
+ pass
68
+ except ImportError:
69
+ print(
70
+ "Intel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not"
71
+ " detected. If you see \"ValueError: Invalid backend: 'ccl'\" error, please install Intel(R) oneCCL"
72
+ " Bindings for PyTorch*."
73
+ )
74
+ return (
75
+ importlib.util.find_spec("torch_ccl") is not None
76
+ or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
77
+ )
78
+
79
+
80
+ def get_ccl_version():
81
+ return importlib.metadata.version("oneccl_bind_pt")
82
+
83
+
84
+ def is_pynvml_available():
85
+ return _is_package_available("pynvml")
86
+
87
+
88
+ def is_pytest_available():
89
+ return _is_package_available("pytest")
90
+
91
+
92
+ def is_msamp_available():
93
+ return _is_package_available("msamp", "ms-amp")
94
+
95
+
96
+ def is_schedulefree_available():
97
+ return _is_package_available("schedulefree")
98
+
99
+
100
+ def is_transformer_engine_available():
101
+ return _is_package_available("transformer_engine")
102
+
103
+
104
+ def is_lomo_available():
105
+ return _is_package_available("lomo_optim")
106
+
107
+
108
+ def is_fp8_available():
109
+ return is_msamp_available() or is_transformer_engine_available()
110
+
111
+
112
+ def is_cuda_available():
113
+ """
114
+ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
115
+ uninitialized.
116
+ """
117
+ pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK")
118
+ try:
119
+ os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1)
120
+ available = torch.cuda.is_available()
121
+ finally:
122
+ if pytorch_nvml_based_cuda_check_previous_value:
123
+ os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value
124
+ else:
125
+ os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None)
126
+
127
+ return available
128
+
129
+
130
+ @lru_cache
131
+ def is_tpu_available(check_device=True):
132
+ "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
133
+ warnings.warn(
134
+ "`is_tpu_available` is deprecated and will be removed in v0.27.0. "
135
+ "Please use the `is_torch_xla_available` instead.",
136
+ FutureWarning,
137
+ )
138
+ # Due to bugs on the amp series GPUs, we disable torch-xla on them
139
+ if is_cuda_available():
140
+ return False
141
+ if check_device:
142
+ if _tpu_available:
143
+ try:
144
+ # Will raise a RuntimeError if no XLA configuration is found
145
+ _ = xm.xla_device()
146
+ return True
147
+ except RuntimeError:
148
+ return False
149
+ return _tpu_available
150
+
151
+
152
+ @lru_cache
153
+ def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False):
154
+ """
155
+ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
156
+ the USE_TORCH_XLA to false.
157
+ """
158
+ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true."
159
+
160
+ if not _torch_xla_available:
161
+ return False
162
+ elif check_is_gpu:
163
+ return torch_xla.runtime.device_type() in ["GPU", "CUDA"]
164
+ elif check_is_tpu:
165
+ return torch_xla.runtime.device_type() == "TPU"
166
+
167
+ return True
168
+
169
+
170
+ def is_deepspeed_available():
171
+ if is_mlu_available():
172
+ return _is_package_available("deepspeed", metadata_name="deepspeed-mlu")
173
+ return _is_package_available("deepspeed")
174
+
175
+
176
+ def is_pippy_available():
177
+ package_exists = _is_package_available("pippy", "torchpippy")
178
+ if package_exists:
179
+ pippy_version = version.parse(importlib.metadata.version("torchpippy"))
180
+ return compare_versions(pippy_version, ">", "0.1.1")
181
+ return False
182
+
183
+
184
+ def is_bf16_available(ignore_tpu=False):
185
+ "Checks if bf16 is supported, optionally ignoring the TPU"
186
+ if is_torch_xla_available(check_is_tpu=True):
187
+ return not ignore_tpu
188
+ if is_cuda_available():
189
+ return torch.cuda.is_bf16_supported()
190
+ if is_mps_available():
191
+ return False
192
+ return True
193
+
194
+
195
+ def is_4bit_bnb_available():
196
+ package_exists = _is_package_available("bitsandbytes")
197
+ if package_exists:
198
+ bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
199
+ return compare_versions(bnb_version, ">=", "0.39.0")
200
+ return False
201
+
202
+
203
+ def is_8bit_bnb_available():
204
+ package_exists = _is_package_available("bitsandbytes")
205
+ if package_exists:
206
+ bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
207
+ return compare_versions(bnb_version, ">=", "0.37.2")
208
+ return False
209
+
210
+
211
+ def is_bnb_available():
212
+ return _is_package_available("bitsandbytes")
213
+
214
+
215
+ def is_torchvision_available():
216
+ return _is_package_available("torchvision")
217
+
218
+
219
+ def is_megatron_lm_available():
220
+ if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
221
+ package_exists = importlib.util.find_spec("megatron") is not None
222
+ if package_exists:
223
+ try:
224
+ megatron_version = parse(importlib.metadata.version("megatron-lm"))
225
+ return compare_versions(megatron_version, ">=", "2.2.0")
226
+ except Exception as e:
227
+ warnings.warn(f"Parse Megatron version failed. Exception:{e}")
228
+ return False
229
+
230
+
231
+ def is_transformers_available():
232
+ return _is_package_available("transformers")
233
+
234
+
235
+ def is_datasets_available():
236
+ return _is_package_available("datasets")
237
+
238
+
239
+ def is_peft_available():
240
+ return _is_package_available("peft")
241
+
242
+
243
+ def is_timm_available():
244
+ return _is_package_available("timm")
245
+
246
+
247
+ def is_aim_available():
248
+ package_exists = _is_package_available("aim")
249
+ if package_exists:
250
+ aim_version = version.parse(importlib.metadata.version("aim"))
251
+ return compare_versions(aim_version, "<", "4.0.0")
252
+ return False
253
+
254
+
255
+ def is_tensorboard_available():
256
+ return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
257
+
258
+
259
+ def is_wandb_available():
260
+ return _is_package_available("wandb")
261
+
262
+
263
+ def is_comet_ml_available():
264
+ return _is_package_available("comet_ml")
265
+
266
+
267
+ def is_boto3_available():
268
+ return _is_package_available("boto3")
269
+
270
+
271
+ def is_rich_available():
272
+ if _is_package_available("rich"):
273
+ if "ACCELERATE_DISABLE_RICH" in os.environ:
274
+ warnings.warn(
275
+ "`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`."
276
+ )
277
+ return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False)
278
+ return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False)
279
+ return False
280
+
281
+
282
+ def is_sagemaker_available():
283
+ return _is_package_available("sagemaker")
284
+
285
+
286
+ def is_tqdm_available():
287
+ return _is_package_available("tqdm")
288
+
289
+
290
+ def is_clearml_available():
291
+ return _is_package_available("clearml")
292
+
293
+
294
+ def is_pandas_available():
295
+ return _is_package_available("pandas")
296
+
297
+
298
+ def is_mlflow_available():
299
+ if _is_package_available("mlflow"):
300
+ return True
301
+
302
+ if importlib.util.find_spec("mlflow") is not None:
303
+ try:
304
+ _ = importlib.metadata.metadata("mlflow-skinny")
305
+ return True
306
+ except importlib.metadata.PackageNotFoundError:
307
+ return False
308
+ return False
309
+
310
+
311
+ def is_mps_available():
312
+ return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
313
+
314
+
315
+ def is_ipex_available():
316
+ def get_major_and_minor_from_version(full_version):
317
+ return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
318
+
319
+ _torch_version = importlib.metadata.version("torch")
320
+ if importlib.util.find_spec("intel_extension_for_pytorch") is None:
321
+ return False
322
+ _ipex_version = "N/A"
323
+ try:
324
+ _ipex_version = importlib.metadata.version("intel_extension_for_pytorch")
325
+ except importlib.metadata.PackageNotFoundError:
326
+ return False
327
+ torch_major_and_minor = get_major_and_minor_from_version(_torch_version)
328
+ ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version)
329
+ if torch_major_and_minor != ipex_major_and_minor:
330
+ warnings.warn(
331
+ f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*,"
332
+ f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
333
+ )
334
+ return False
335
+ return True
336
+
337
+
338
+ @lru_cache
339
+ def is_mlu_available(check_device=False):
340
+ "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment"
341
+ if importlib.util.find_spec("torch_mlu") is None:
342
+ return False
343
+
344
+ import torch
345
+ import torch_mlu # noqa: F401
346
+
347
+ if check_device:
348
+ try:
349
+ # Will raise a RuntimeError if no MLU is found
350
+ _ = torch.mlu.device_count()
351
+ return torch.mlu.is_available()
352
+ except RuntimeError:
353
+ return False
354
+ return hasattr(torch, "mlu") and torch.mlu.is_available()
355
+
356
+
357
+ @lru_cache
358
+ def is_npu_available(check_device=False):
359
+ "Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
360
+ if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
361
+ return False
362
+
363
+ import torch
364
+ import torch_npu # noqa: F401
365
+
366
+ if check_device:
367
+ try:
368
+ # Will raise a RuntimeError if no NPU is found
369
+ _ = torch.npu.device_count()
370
+ return torch.npu.is_available()
371
+ except RuntimeError:
372
+ return False
373
+ return hasattr(torch, "npu") and torch.npu.is_available()
374
+
375
+
376
+ @lru_cache
377
+ def is_xpu_available(check_device=False):
378
+ "check if user disables it explicitly"
379
+ if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True):
380
+ return False
381
+ "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
382
+ if is_ipex_available():
383
+ import torch
384
+
385
+ if is_torch_version("<=", "1.12"):
386
+ return False
387
+ else:
388
+ return False
389
+
390
+ import intel_extension_for_pytorch # noqa: F401
391
+
392
+ if check_device:
393
+ try:
394
+ # Will raise a RuntimeError if no XPU is found
395
+ _ = torch.xpu.device_count()
396
+ return torch.xpu.is_available()
397
+ except RuntimeError:
398
+ return False
399
+ return hasattr(torch, "xpu") and torch.xpu.is_available()
400
+
401
+
402
+ def is_dvclive_available():
403
+ return _is_package_available("dvclive")
llmeval-env/lib/python3.10/site-packages/accelerate/utils/launch.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+ import subprocess
18
+ import sys
19
+ import warnings
20
+ from ast import literal_eval
21
+ from shutil import which
22
+ from typing import Any, Dict, List, Tuple
23
+
24
+ import torch
25
+
26
+ from ..commands.config.config_args import SageMakerConfig
27
+ from ..utils import (
28
+ DynamoBackend,
29
+ PrecisionType,
30
+ is_ipex_available,
31
+ is_mlu_available,
32
+ is_npu_available,
33
+ is_torch_xla_available,
34
+ is_xpu_available,
35
+ )
36
+ from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS
37
+ from ..utils.other import is_port_in_use, merge_dicts
38
+ from .dataclasses import DistributedType, SageMakerDistributedType
39
+
40
+
41
+ def _filter_args(args, parser, default_args=[]):
42
+ """
43
+ Filters out all `accelerate` specific args
44
+ """
45
+ new_args, _ = parser.parse_known_args(default_args)
46
+ for key, value in vars(args).items():
47
+ if key in vars(new_args).keys():
48
+ setattr(new_args, key, value)
49
+ return new_args
50
+
51
+
52
+ def _get_mpirun_args():
53
+ """
54
+ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs
55
+ are: OpenMPI, Intel MPI, or MVAPICH.
56
+
57
+ Returns: Program name and arg names for hostfile, num processes, and processes per node
58
+ """
59
+ # Find the MPI program name
60
+ mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)]
61
+
62
+ if len(mpi_apps) == 0:
63
+ raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.")
64
+
65
+ # Call the app with the --version flag to determine which MPI app is installed
66
+ mpi_app = mpi_apps[0]
67
+ mpirun_version = subprocess.check_output([mpi_app, "--version"])
68
+
69
+ if b"Open MPI" in mpirun_version:
70
+ return mpi_app, "--hostfile", "-n", "--npernode"
71
+ else:
72
+ # Intel MPI and MVAPICH both use the same arg names
73
+ return mpi_app, "-f", "-n", "-ppn"
74
+
75
+
76
+ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
77
+ """
78
+ Prepares and returns the command list and an environment with the correct simple launcher environment variables.
79
+ """
80
+ cmd = []
81
+ if args.no_python and args.module:
82
+ raise ValueError("--module and --no_python cannot be used together")
83
+
84
+ if args.mpirun_hostfile is not None:
85
+ mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg = _get_mpirun_args()
86
+ mpirun_ccl = getattr(args, "mpirun_ccl", None)
87
+ num_machines = args.num_machines
88
+ num_processes = getattr(args, "num_processes", None)
89
+ nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1"
90
+ cmd += [mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node]
91
+ if num_processes:
92
+ cmd += [num_proc_arg, str(num_processes)]
93
+ if not args.no_python:
94
+ cmd.append(sys.executable)
95
+ if args.module:
96
+ cmd.append("-m")
97
+ cmd.append(args.training_script)
98
+ cmd.extend(args.training_script_args)
99
+
100
+ current_env = os.environ.copy()
101
+ current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu)
102
+ if args.debug:
103
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
104
+ if args.gpu_ids != "all" and args.gpu_ids is not None:
105
+ if is_xpu_available():
106
+ current_env["ZE_AFFINITY_MASK"] = args.gpu_ids
107
+ elif is_mlu_available():
108
+ current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids
109
+ elif is_npu_available():
110
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids
111
+ else:
112
+ current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
113
+ if args.num_machines > 1:
114
+ current_env["MASTER_ADDR"] = args.main_process_ip
115
+ current_env["MASTER_PORT"] = str(args.main_process_port)
116
+
117
+ if args.mpirun_hostfile is not None:
118
+ current_env["CCL_WORKER_COUNT"] = mpirun_ccl
119
+ elif args.num_processes > 1:
120
+ current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1"
121
+ current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500"
122
+
123
+ try:
124
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
125
+ except ValueError:
126
+ raise ValueError(
127
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
128
+ )
129
+
130
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
131
+
132
+ try:
133
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
134
+ except ValueError:
135
+ raise ValueError(
136
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
137
+ )
138
+ current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
139
+ current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
140
+ current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
141
+ current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
142
+
143
+ current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
144
+ if is_ipex_available():
145
+ current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower()
146
+ current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower()
147
+ if args.enable_cpu_affinity:
148
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
149
+ return cmd, current_env
150
+
151
+
152
+ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
153
+ """
154
+ Prepares and returns an environment with the correct multi-GPU environment variables.
155
+ """
156
+ num_processes = args.num_processes
157
+ num_machines = args.num_machines
158
+ main_process_ip = args.main_process_ip
159
+ main_process_port = args.main_process_port
160
+ if num_machines > 1:
161
+ args.nproc_per_node = str(num_processes // num_machines)
162
+ args.nnodes = str(num_machines)
163
+ args.node_rank = int(args.machine_rank)
164
+ if getattr(args, "same_network", False):
165
+ args.master_addr = str(main_process_ip)
166
+ args.master_port = str(main_process_port)
167
+ else:
168
+ args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}"
169
+ else:
170
+ args.nproc_per_node = str(num_processes)
171
+ if main_process_port is not None:
172
+ args.master_port = str(main_process_port)
173
+
174
+ if main_process_port is None:
175
+ main_process_port = 29500
176
+
177
+ # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
178
+ # for some reasons like splitting log files.
179
+ need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
180
+ if need_port_check and is_port_in_use(main_process_port):
181
+ raise ConnectionError(
182
+ f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
183
+ "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)"
184
+ " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
185
+ )
186
+
187
+ if args.module and args.no_python:
188
+ raise ValueError("--module and --no_python cannot be used together")
189
+ elif args.module:
190
+ args.module = True
191
+ elif args.no_python:
192
+ args.no_python = True
193
+
194
+ current_env = os.environ.copy()
195
+ if args.debug:
196
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
197
+ gpu_ids = getattr(args, "gpu_ids", "all")
198
+ if gpu_ids != "all" and args.gpu_ids is not None:
199
+ if is_xpu_available():
200
+ current_env["ZE_AFFINITY_MASK"] = gpu_ids
201
+ elif is_mlu_available():
202
+ current_env["MLU_VISIBLE_DEVICES"] = gpu_ids
203
+ elif is_npu_available():
204
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids
205
+ else:
206
+ current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
207
+ mixed_precision = args.mixed_precision.lower()
208
+ try:
209
+ mixed_precision = PrecisionType(mixed_precision)
210
+ except ValueError:
211
+ raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.")
212
+
213
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
214
+
215
+ try:
216
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
217
+ except ValueError:
218
+ raise ValueError(
219
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
220
+ )
221
+ current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value
222
+ current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
223
+ current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
224
+ current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
225
+
226
+ if args.use_fsdp:
227
+ current_env["ACCELERATE_USE_FSDP"] = "true"
228
+ if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
229
+ raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
230
+
231
+ current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
232
+ current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
233
+ current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
234
+ if args.fsdp_auto_wrap_policy is not None:
235
+ current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy)
236
+ if args.fsdp_transformer_layer_cls_to_wrap is not None:
237
+ current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap)
238
+ if args.fsdp_backward_prefetch_policy is not None:
239
+ warnings.warn(
240
+ "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use"
241
+ " `fsdp_backward_prefetch` instead",
242
+ FutureWarning,
243
+ )
244
+ args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy
245
+ if args.fsdp_backward_prefetch is not None:
246
+ current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch)
247
+ if args.fsdp_state_dict_type is not None:
248
+ current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type)
249
+ current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower()
250
+ current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
251
+ current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
252
+ current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
253
+
254
+ if args.use_megatron_lm:
255
+ prefix = "MEGATRON_LM_"
256
+ current_env["ACCELERATE_USE_MEGATRON_LM"] = "true"
257
+ current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree)
258
+ current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree)
259
+ current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping)
260
+ if args.megatron_lm_num_micro_batches is not None:
261
+ current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches)
262
+ if args.megatron_lm_sequence_parallelism is not None:
263
+ current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism)
264
+ if args.megatron_lm_recompute_activations is not None:
265
+ current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations)
266
+ if args.megatron_lm_use_distributed_optimizer is not None:
267
+ current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer)
268
+
269
+ current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
270
+ if args.enable_cpu_affinity:
271
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
272
+ return current_env
273
+
274
+
275
+ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
276
+ """
277
+ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables.
278
+ """
279
+ num_processes = args.num_processes
280
+ num_machines = args.num_machines
281
+ main_process_ip = args.main_process_ip
282
+ main_process_port = args.main_process_port
283
+ cmd = None
284
+
285
+ # make sure launcher is not None
286
+ if args.deepspeed_multinode_launcher is None:
287
+ # set to default pdsh
288
+ args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0]
289
+
290
+ if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
291
+ cmd = ["deepspeed", "--no_local_rank"]
292
+ cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)])
293
+ if args.deepspeed_exclusion_filter is not None:
294
+ cmd.extend(
295
+ [
296
+ "--exclude",
297
+ str(args.deepspeed_exclusion_filter),
298
+ ]
299
+ )
300
+ elif args.deepspeed_inclusion_filter is not None:
301
+ cmd.extend(
302
+ [
303
+ "--include",
304
+ str(args.deepspeed_inclusion_filter),
305
+ ]
306
+ )
307
+ else:
308
+ cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)])
309
+ if main_process_ip:
310
+ cmd.extend(["--master_addr", str(main_process_ip)])
311
+ cmd.extend(["--master_port", str(main_process_port)])
312
+ if args.module and args.no_python:
313
+ raise ValueError("--module and --no_python cannot be used together")
314
+ elif args.module:
315
+ cmd.append("--module")
316
+ elif args.no_python:
317
+ cmd.append("--no_python")
318
+ cmd.append(args.training_script)
319
+ cmd.extend(args.training_script_args)
320
+ elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]:
321
+ args.nproc_per_node = str(num_processes // num_machines)
322
+ args.nnodes = str(num_machines)
323
+ args.node_rank = int(args.machine_rank)
324
+ if getattr(args, "same_network", False):
325
+ args.master_addr = str(main_process_ip)
326
+ args.master_port = str(main_process_port)
327
+ else:
328
+ args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}"
329
+ else:
330
+ args.nproc_per_node = str(num_processes)
331
+ if main_process_port is not None:
332
+ args.master_port = str(main_process_port)
333
+
334
+ if main_process_port is None:
335
+ main_process_port = 29500
336
+
337
+ # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
338
+ # for some reasons like splitting log files.
339
+ need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
340
+ if need_port_check and is_port_in_use(main_process_port):
341
+ raise ConnectionError(
342
+ f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. "
343
+ "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)"
344
+ " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
345
+ )
346
+
347
+ if args.module and args.no_python:
348
+ raise ValueError("--module and --no_python cannot be used together")
349
+ elif args.module:
350
+ args.module = True
351
+ elif args.no_python:
352
+ args.no_python = True
353
+
354
+ current_env = os.environ.copy()
355
+ if args.debug:
356
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
357
+ gpu_ids = getattr(args, "gpu_ids", "all")
358
+ if gpu_ids != "all" and args.gpu_ids is not None:
359
+ if is_xpu_available():
360
+ current_env["ZE_AFFINITY_MASK"] = gpu_ids
361
+ elif is_mlu_available():
362
+ current_env["MLU_VISIBLE_DEVICES"] = gpu_ids
363
+ elif is_npu_available():
364
+ current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids
365
+ else:
366
+ current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids
367
+ try:
368
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
369
+ except ValueError:
370
+ raise ValueError(
371
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
372
+ )
373
+
374
+ current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath("."))
375
+ current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
376
+ current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower()
377
+ current_env["ACCELERATE_USE_DEEPSPEED"] = "true"
378
+ if args.zero_stage is not None:
379
+ current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage)
380
+ if args.gradient_accumulation_steps is not None:
381
+ current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps)
382
+ if args.gradient_clipping is not None:
383
+ current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower()
384
+ if args.offload_optimizer_device is not None:
385
+ current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower()
386
+ if args.offload_param_device is not None:
387
+ current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower()
388
+ if args.zero3_init_flag is not None:
389
+ current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower()
390
+ if args.zero3_save_16bit_model is not None:
391
+ current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower()
392
+ if args.deepspeed_config_file is not None:
393
+ current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file)
394
+ if args.enable_cpu_affinity:
395
+ current_env["ACCELERATE_CPU_AFFINITY"] = "1"
396
+ if args.deepspeed_moe_layer_cls_names is not None:
397
+ current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names)
398
+ return cmd, current_env
399
+
400
+
401
+ def prepare_tpu(
402
+ args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False
403
+ ) -> Tuple[argparse.Namespace, Dict[str, str]]:
404
+ """
405
+ Prepares and returns an environment with the correct TPU environment variables.
406
+ """
407
+ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True):
408
+ if args.downcast_bf16:
409
+ current_env["XLA_DOWNCAST_BF16"] = "1"
410
+ else:
411
+ current_env["XLA_USE_BF16"] = "1"
412
+ if args.debug:
413
+ current_env["ACCELERATE_DEBUG_MODE"] = "true"
414
+ if pod:
415
+ # Take explicit args and set them up for XLA
416
+ args.vm = args.tpu_vm
417
+ args.tpu = args.tpu_name
418
+ return args, current_env
419
+
420
+
421
+ def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
422
+ if len(nargs) < 0:
423
+ return {}
424
+ # helper function to infer type for argsparser
425
+
426
+ def _infer_type(s):
427
+ try:
428
+ s = float(s)
429
+
430
+ if s // 1 == s:
431
+ return int(s)
432
+ return s
433
+ except ValueError:
434
+ return s
435
+
436
+ parser = argparse.ArgumentParser()
437
+ _, unknown = parser.parse_known_args(nargs)
438
+ for index, argument in enumerate(unknown):
439
+ if argument.startswith(("-", "--")):
440
+ action = None
441
+ if index + 1 < len(unknown): # checks if next index would be in list
442
+ if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key
443
+ # raise an error if element is store_true or store_false
444
+ raise ValueError(
445
+ "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
446
+ )
447
+ else: # raise an error if last element is store_true or store_false
448
+ raise ValueError(
449
+ "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types"
450
+ )
451
+ # adds argument to parser based on action_store true
452
+ if action is None:
453
+ parser.add_argument(argument, type=_infer_type)
454
+ else:
455
+ parser.add_argument(argument, action=action)
456
+
457
+ return {
458
+ key: (literal_eval(value) if value in ("True", "False") else value)
459
+ for key, value in parser.parse_args(nargs).__dict__.items()
460
+ }
461
+
462
+
463
+ def prepare_sagemager_args_inputs(
464
+ sagemaker_config: SageMakerConfig, args: argparse.Namespace
465
+ ) -> Tuple[argparse.Namespace, Dict[str, Any]]:
466
+ # configure environment
467
+ print("Configuring Amazon SageMaker environment")
468
+ os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
469
+
470
+ # configure credentials
471
+ if sagemaker_config.profile is not None:
472
+ os.environ["AWS_PROFILE"] = sagemaker_config.profile
473
+ elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None:
474
+ os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id
475
+ os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key
476
+ else:
477
+ raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile")
478
+
479
+ # extract needed arguments
480
+ source_dir = os.path.dirname(args.training_script)
481
+ if not source_dir: # checks if string is empty
482
+ source_dir = "."
483
+ entry_point = os.path.basename(args.training_script)
484
+ if not entry_point.endswith(".py"):
485
+ raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
486
+
487
+ print("Converting Arguments to Hyperparameters")
488
+ hyperparameters = _convert_nargs_to_dict(args.training_script_args)
489
+
490
+ try:
491
+ mixed_precision = PrecisionType(args.mixed_precision.lower())
492
+ except ValueError:
493
+ raise ValueError(
494
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
495
+ )
496
+
497
+ try:
498
+ dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
499
+ except ValueError:
500
+ raise ValueError(
501
+ f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
502
+ )
503
+
504
+ # Environment variables to be set for use during training job
505
+ environment = {
506
+ "ACCELERATE_USE_SAGEMAKER": "true",
507
+ "ACCELERATE_MIXED_PRECISION": str(mixed_precision),
508
+ "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value,
509
+ "ACCELERATE_DYNAMO_MODE": args.dynamo_mode,
510
+ "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph),
511
+ "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic),
512
+ "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value,
513
+ }
514
+ # configure distribution set up
515
+ distribution = None
516
+ if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:
517
+ distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
518
+
519
+ # configure sagemaker inputs
520
+ sagemaker_inputs = None
521
+ if sagemaker_config.sagemaker_inputs_file is not None:
522
+ print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file")
523
+ sagemaker_inputs = {}
524
+ with open(sagemaker_config.sagemaker_inputs_file) as file:
525
+ for i, line in enumerate(file):
526
+ if i == 0:
527
+ continue
528
+ l = line.split("\t")
529
+ sagemaker_inputs[l[0]] = l[1].strip()
530
+ print(f"Loaded SageMaker Inputs: {sagemaker_inputs}")
531
+
532
+ # configure sagemaker metrics
533
+ sagemaker_metrics = None
534
+ if sagemaker_config.sagemaker_metrics_file is not None:
535
+ print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file")
536
+ sagemaker_metrics = []
537
+ with open(sagemaker_config.sagemaker_metrics_file) as file:
538
+ for i, line in enumerate(file):
539
+ if i == 0:
540
+ continue
541
+ l = line.split("\t")
542
+ metric_dict = {
543
+ "Name": l[0],
544
+ "Regex": l[1].strip(),
545
+ }
546
+ sagemaker_metrics.append(metric_dict)
547
+ print(f"Loaded SageMaker Metrics: {sagemaker_metrics}")
548
+
549
+ # configure session
550
+ print("Creating Estimator")
551
+ args = {
552
+ "image_uri": sagemaker_config.image_uri,
553
+ "entry_point": entry_point,
554
+ "source_dir": source_dir,
555
+ "role": sagemaker_config.iam_role_name,
556
+ "transformers_version": sagemaker_config.transformers_version,
557
+ "pytorch_version": sagemaker_config.pytorch_version,
558
+ "py_version": sagemaker_config.py_version,
559
+ "base_job_name": sagemaker_config.base_job_name,
560
+ "instance_count": sagemaker_config.num_machines,
561
+ "instance_type": sagemaker_config.ec2_instance_type,
562
+ "debugger_hook_config": False,
563
+ "distribution": distribution,
564
+ "hyperparameters": hyperparameters,
565
+ "environment": environment,
566
+ "metric_definitions": sagemaker_metrics,
567
+ }
568
+
569
+ if sagemaker_config.additional_args is not None:
570
+ args = merge_dicts(sagemaker_config.additional_args, args)
571
+ return args, sagemaker_inputs
572
+
573
+
574
+ def env_var_path_add(env_var_name, path_to_add):
575
+ """
576
+ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the
577
+ caller to set it in os.environ.
578
+ """
579
+ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0]
580
+ paths.append(str(path_to_add))
581
+ return ":".join(paths)
582
+
583
+
584
+ class PrepareForLaunch:
585
+ """
586
+ Prepare a function that will launched in a distributed setup.
587
+
588
+ Args:
589
+ launcher (`Callable`):
590
+ The function to launch.
591
+ distributed_type ([`~state.DistributedType`]):
592
+ The distributed type to prepare for.
593
+ debug (`bool`, *optional*, defaults to `False`):
594
+ Whether or not this is a debug launch.
595
+ """
596
+
597
+ def __init__(self, launcher, distributed_type="NO", debug=False):
598
+ self.launcher = launcher
599
+ self.distributed_type = DistributedType(distributed_type)
600
+ self.debug = debug
601
+
602
+ def __call__(self, index, *args):
603
+ if self.debug:
604
+ world_size = int(os.environ.get("WORLD_SIZE"))
605
+ rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE")
606
+ torch.distributed.init_process_group(
607
+ "gloo",
608
+ rank=index,
609
+ store=torch.distributed.FileStore(rdv_file, world_size),
610
+ world_size=world_size,
611
+ )
612
+ elif self.distributed_type in (
613
+ DistributedType.MULTI_GPU,
614
+ DistributedType.MULTI_MLU,
615
+ DistributedType.MULTI_NPU,
616
+ DistributedType.MULTI_XPU,
617
+ DistributedType.MULTI_CPU,
618
+ ):
619
+ # Prepare the environment for torch.distributed
620
+ os.environ["LOCAL_RANK"] = str(index)
621
+ nproc = int(os.environ.get("NPROC", 1))
622
+ node_rank = int(os.environ.get("NODE_RANK", 0))
623
+ os.environ["RANK"] = str(nproc * node_rank + index)
624
+
625
+ os.environ["FORK_LAUNCHED"] = str(1)
626
+ self.launcher(*args)
llmeval-env/lib/python3.10/site-packages/accelerate/utils/megatron_lm.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import math
17
+ from abc import ABC
18
+ from functools import partial
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+ from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
24
+
25
+ from ..optimizer import AcceleratedOptimizer
26
+ from ..scheduler import AcceleratedScheduler
27
+ from .imports import is_megatron_lm_available, is_transformers_available
28
+ from .operations import recursively_apply, send_to_device
29
+
30
+
31
+ if is_transformers_available():
32
+ from transformers.modeling_outputs import (
33
+ CausalLMOutputWithCrossAttentions,
34
+ Seq2SeqLMOutput,
35
+ SequenceClassifierOutput,
36
+ )
37
+
38
+
39
+ if is_megatron_lm_available():
40
+ from megatron import (
41
+ get_args,
42
+ get_num_microbatches,
43
+ get_tensorboard_writer,
44
+ get_timers,
45
+ get_tokenizer,
46
+ mpu,
47
+ print_rank_0,
48
+ print_rank_last,
49
+ )
50
+ from megatron.arguments import _add_data_args, _add_validation_args, parse_args, validate_args
51
+ from megatron.checkpointing import load_args_from_checkpoint, load_checkpoint, save_checkpoint
52
+ from megatron.data.data_samplers import MegatronPretrainingRandomSampler, MegatronPretrainingSampler
53
+ from megatron.global_vars import set_global_variables
54
+ from megatron.initialize import (
55
+ _compile_dependencies,
56
+ _init_autoresume,
57
+ _set_random_seed,
58
+ set_jit_fusion_options,
59
+ write_args_to_tensorboard,
60
+ )
61
+ from megatron.model import BertModel, Float16Module, GPTModel, ModelType, T5Model
62
+ from megatron.model import DistributedDataParallel as LocalDDP
63
+ from megatron.model.classification import Classification
64
+ from megatron.optimizer import get_megatron_optimizer
65
+ from megatron.schedules import get_forward_backward_func
66
+ from megatron.text_generation.communication import broadcast_int_list, broadcast_tensor
67
+ from megatron.text_generation.generation import (
68
+ beam_search_and_return_on_first_stage,
69
+ generate_tokens_probs_and_return_on_first_stage,
70
+ )
71
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
72
+ from megatron.training import get_model, get_optimizer_param_scheduler, training_log
73
+ from megatron.utils import (
74
+ average_losses_across_data_parallel_group,
75
+ calc_params_l2_norm,
76
+ get_ltor_masks_and_position_ids,
77
+ unwrap_model,
78
+ )
79
+
80
+
81
+ # model utilities
82
+ def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True):
83
+ """Build the model."""
84
+ args = get_args()
85
+ mode = "pre-training" if args.pretraining_flag else "fine-tuning"
86
+ if args.rank == 0:
87
+ print(f"Building {args.model_type_name} model in the {mode} mode.")
88
+ print(
89
+ "The Megatron LM model weights are initialized at random in `accelerator.prepare`. "
90
+ "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup."
91
+ )
92
+ if args.model_type_name == "bert":
93
+ if args.pretraining_flag:
94
+ num_tokentypes = 2 if args.bert_binary_head else 0
95
+ model = BertModel(
96
+ num_tokentypes=num_tokentypes,
97
+ add_binary_head=args.bert_binary_head,
98
+ parallel_output=True,
99
+ pre_process=pre_process,
100
+ post_process=post_process,
101
+ )
102
+ else:
103
+ model = Classification(
104
+ num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process
105
+ )
106
+ elif args.model_type_name == "gpt":
107
+ model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process)
108
+ elif args.model_type_name == "t5":
109
+ model = T5Model(
110
+ num_tokentypes=0,
111
+ parallel_output=True,
112
+ pre_process=pre_process,
113
+ post_process=post_process,
114
+ add_encoder=add_encoder,
115
+ add_decoder=add_decoder,
116
+ )
117
+ else:
118
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
119
+ return model
120
+
121
+
122
+ def prepare_model(accelerator):
123
+ accelerator.print("Preparing model")
124
+ args = get_args()
125
+ if accelerator.state.megatron_lm_plugin.custom_prepare_model_function is not None:
126
+ if accelerator.state.megatron_lm_plugin.custom_model_provider_function is None:
127
+ raise ValueError(
128
+ "You must provide a `custom_model_provider_function` when using a `custom_prepare_model_function`."
129
+ )
130
+ custom_model_provider_func = accelerator.state.megatron_lm_plugin.custom_model_provider_function
131
+ model = accelerator.state.megatron_lm_plugin.custom_prepare_model_function(custom_model_provider_func)
132
+ else:
133
+ if args.model_type_name in ("bert", "gpt"):
134
+ model_type = ModelType.encoder_or_decoder
135
+ elif args.model_type_name == "t5":
136
+ model_type = ModelType.encoder_and_decoder
137
+ if args.pipeline_model_parallel_split_rank is None and args.pipeline_model_parallel_size > 1:
138
+ args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2
139
+ model = get_model(model_provider_func, model_type)
140
+ return model
141
+
142
+
143
+ # dataloader utilities
144
+ class MegatronLMDummyDataLoader:
145
+ """
146
+ Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training
147
+
148
+ Args:
149
+ **dataset_kwargs: Megatron data arguments.
150
+ """
151
+
152
+ def __init__(self, **dataset_kwargs):
153
+ parser = argparse.ArgumentParser()
154
+ parser = _add_data_args(parser)
155
+ parser = _add_validation_args(parser)
156
+ data_args = parser.parse_known_args()
157
+ self.dataset_args = vars(data_args[0])
158
+ self.dataset_args.update(dataset_kwargs)
159
+ self.dataset_args["megatron_dataset_flag"] = True
160
+
161
+ def set_megatron_data_args(self):
162
+ args = get_args()
163
+ for key, value in self.dataset_args.items():
164
+ setattr(args, key, value)
165
+
166
+ def get_train_valid_test_datasets_provider(self):
167
+ def train_valid_test_datasets_provider(train_val_test_num_samples):
168
+ """Build train, valid, and test datasets."""
169
+ args = get_args()
170
+ dataset_args = {
171
+ "data_prefix": args.data_path,
172
+ "data_impl": args.data_impl,
173
+ "splits_string": args.split,
174
+ "train_valid_test_num_samples": train_val_test_num_samples,
175
+ "skip_warmup": (not args.mmap_warmup),
176
+ "seed": args.seed,
177
+ }
178
+ if args.model_type_name == "bert":
179
+ dataset_args.update(
180
+ {
181
+ "max_seq_length": args.seq_length,
182
+ "masked_lm_prob": args.mask_prob,
183
+ "short_seq_prob": args.short_seq_prob,
184
+ "binary_head": args.bert_binary_head,
185
+ }
186
+ )
187
+ elif args.model_type_name == "gpt":
188
+ dataset_args.update(
189
+ {
190
+ "seq_length": args.seq_length,
191
+ }
192
+ )
193
+ elif args.model_type_name == "t5":
194
+ dataset_args.update(
195
+ {
196
+ "max_seq_length": args.encoder_seq_length,
197
+ "max_seq_length_dec": args.decoder_seq_length,
198
+ "masked_lm_prob": args.mask_prob,
199
+ "short_seq_prob": args.short_seq_prob,
200
+ "dataset_type": "t5",
201
+ }
202
+ )
203
+ else:
204
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
205
+ if args.model_type_name == "gpt":
206
+ from megatron.data.gpt_dataset import build_train_valid_test_datasets
207
+ else:
208
+ from megatron.data.dataset_utils import build_train_valid_test_datasets
209
+ train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
210
+ return train_ds, valid_ds, test_ds
211
+
212
+ return train_valid_test_datasets_provider
213
+
214
+ def build_pretraining_data_loader(self, dataset, consumed_samples):
215
+ if dataset is None:
216
+ return None
217
+ args = get_args()
218
+ micro_batch_size = args.micro_batch_size * args.num_micro_batches
219
+
220
+ # Megatron sampler
221
+ if args.dataloader_type == "single":
222
+ batch_sampler = MegatronPretrainingSampler(
223
+ total_samples=len(dataset),
224
+ consumed_samples=consumed_samples,
225
+ micro_batch_size=micro_batch_size,
226
+ data_parallel_rank=mpu.get_data_parallel_rank(),
227
+ data_parallel_size=mpu.get_data_parallel_world_size(),
228
+ )
229
+ elif args.dataloader_type == "cyclic":
230
+ batch_sampler = MegatronPretrainingRandomSampler(
231
+ dataset,
232
+ total_samples=len(dataset),
233
+ consumed_samples=consumed_samples,
234
+ micro_batch_size=micro_batch_size,
235
+ data_parallel_rank=mpu.get_data_parallel_rank(),
236
+ data_parallel_size=mpu.get_data_parallel_world_size(),
237
+ data_sharding=args.data_sharding,
238
+ )
239
+ else:
240
+ raise Exception(f"{args.dataloader_type} dataloader type is not supported.")
241
+
242
+ # Torch dataloader.
243
+ return torch.utils.data.DataLoader(
244
+ dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
245
+ )
246
+
247
+ def build_train_valid_test_data_iterators(self):
248
+ def cyclic_iter(iter):
249
+ while True:
250
+ yield from iter
251
+
252
+ args = get_args()
253
+
254
+ (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
255
+
256
+ print_rank_0("> building train, validation, and test datasets ...")
257
+
258
+ # Backward compatibility, assume fixed batch size.
259
+ if args.iteration > 0 and args.consumed_train_samples == 0:
260
+ assert args.train_samples is None, "only backward compatiblity support for iteration-based training"
261
+ args.consumed_train_samples = args.iteration * args.global_batch_size
262
+ if args.iteration > 0 and args.consumed_valid_samples == 0:
263
+ if args.train_samples is None:
264
+ args.consumed_valid_samples = (
265
+ (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size
266
+ )
267
+
268
+ # Data loader only on rank 0 of each model parallel group.
269
+ if mpu.get_tensor_model_parallel_rank() == 0:
270
+ # Number of train/valid/test samples.
271
+ if args.train_samples:
272
+ train_samples = args.train_samples
273
+ else:
274
+ train_samples = args.train_iters * args.global_batch_size
275
+ eval_iters = (args.train_iters // args.eval_interval + 1) * args.eval_iters
276
+ test_iters = args.eval_iters
277
+ train_val_test_num_samples = [
278
+ train_samples,
279
+ eval_iters * args.global_batch_size,
280
+ test_iters * args.global_batch_size,
281
+ ]
282
+ print_rank_0(" > datasets target sizes (minimum size):")
283
+ print_rank_0(f" train: {train_val_test_num_samples[0]}")
284
+ print_rank_0(f" validation: {train_val_test_num_samples[1]}")
285
+ print_rank_0(f" test: {train_val_test_num_samples[2]}")
286
+
287
+ # Build the datasets.
288
+ train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()
289
+ train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)
290
+
291
+ # Build dataloders.
292
+ train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)
293
+ valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)
294
+ test_dataloader = self.build_pretraining_data_loader(test_ds, 0)
295
+
296
+ # Flags to know if we need to do training/validation/testing.
297
+ do_train = train_dataloader is not None and args.train_iters > 0
298
+ do_valid = valid_dataloader is not None and args.eval_iters > 0
299
+ do_test = test_dataloader is not None and args.eval_iters > 0
300
+ # Need to broadcast num_tokens and num_type_tokens.
301
+ flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])
302
+ else:
303
+ flags = torch.cuda.LongTensor([0, 0, 0])
304
+
305
+ # Broadcast num tokens.
306
+ torch.distributed.broadcast(
307
+ flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()
308
+ )
309
+ args.do_train = flags[0].item()
310
+ args.do_valid = flags[1].item()
311
+ args.do_test = flags[2].item()
312
+
313
+ # Build iterators.
314
+ dl_type = args.dataloader_type
315
+ assert dl_type in ["single", "cyclic"]
316
+
317
+ if train_dataloader is not None:
318
+ train_data_iterator = (
319
+ iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader))
320
+ )
321
+ else:
322
+ train_data_iterator = None
323
+
324
+ if valid_dataloader is not None:
325
+ valid_data_iterator = (
326
+ iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader))
327
+ )
328
+ else:
329
+ valid_data_iterator = None
330
+
331
+ if test_dataloader is not None:
332
+ test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader))
333
+ else:
334
+ test_data_iterator = None
335
+
336
+ return train_data_iterator, valid_data_iterator, test_data_iterator
337
+
338
+
339
+ def prepare_data_loader(accelerator, dataloader):
340
+ accelerator.print("Preparing dataloader")
341
+ args = get_args()
342
+ if not args.megatron_dataset_flag:
343
+ from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader
344
+
345
+ args = get_args()
346
+ micro_batch_size = args.micro_batch_size * args.num_micro_batches
347
+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}
348
+ if kwargs["batch_size"] is None:
349
+ if isinstance(kwargs["sampler"], torch.utils.data.BatchSampler):
350
+ kwargs["sampler"].batch_size = micro_batch_size
351
+ else:
352
+ del kwargs["sampler"]
353
+ del kwargs["shuffle"]
354
+ del kwargs["batch_size"]
355
+ kwargs["batch_sampler"].batch_size = micro_batch_size
356
+ else:
357
+ del kwargs["batch_sampler"]
358
+ kwargs["batch_size"] = micro_batch_size
359
+
360
+ dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)
361
+ return prepare_data_loader(
362
+ dataloader,
363
+ accelerator.device,
364
+ num_processes=mpu.get_data_parallel_world_size(),
365
+ process_index=mpu.get_data_parallel_rank(),
366
+ split_batches=accelerator.split_batches,
367
+ put_on_device=True,
368
+ rng_types=accelerator.rng_types.copy(),
369
+ dispatch_batches=accelerator.dispatch_batches,
370
+ )
371
+ else:
372
+ if args.consumed_samples is not None:
373
+ (
374
+ args.consumed_train_samples,
375
+ args.consumed_valid_samples,
376
+ args.consumed_test_samples,
377
+ ) = args.consumed_samples
378
+ else:
379
+ args.consumed_train_samples, args.consumed_valid_samples, args.consumed_test_samples = 0, 0, 0
380
+ (
381
+ train_data_iterator,
382
+ valid_data_iterator,
383
+ test_data_iterator,
384
+ ) = dataloader.build_train_valid_test_data_iterators()
385
+ return train_data_iterator, valid_data_iterator, test_data_iterator
386
+
387
+
388
+ # optimizer utilities
389
+ class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
390
+ def __init__(self, optimizer):
391
+ super().__init__(optimizer, device_placement=False, scaler=None)
392
+
393
+ def zero_grad(self, set_to_none=None):
394
+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
395
+
396
+ def step(self):
397
+ pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
398
+
399
+ @property
400
+ def step_was_skipped(self):
401
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
402
+ return self.optimizer.skipped_iter
403
+
404
+
405
+ def prepare_optimizer(accelerator, model):
406
+ accelerator.print("Preparing optimizer")
407
+ args = get_args()
408
+ optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)
409
+ return optimizer
410
+
411
+
412
+ # scheduler utilities
413
+ class MegatronLMDummyScheduler:
414
+ """
415
+ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
416
+ loop when scheduler config is specified in the deepspeed config file.
417
+
418
+ Args:
419
+ optimizer (`torch.optim.optimizer.Optimizer`):
420
+ The optimizer to wrap.
421
+ total_num_steps (int):
422
+ Total number of steps.
423
+ warmup_num_steps (int):
424
+ Number of steps for warmup.
425
+ **kwargs (additional keyword arguments, *optional*):
426
+ Other arguments.
427
+ """
428
+
429
+ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, **kwargs):
430
+ self.optimizer = optimizer
431
+ self.total_num_steps = total_num_steps
432
+ self.warmup_num_steps = warmup_num_steps
433
+ self.kwargs = kwargs
434
+
435
+
436
+ class MegatronLMSchedulerWrapper(AcceleratedScheduler):
437
+ def __init__(self, scheduler, optimizers):
438
+ super().__init__(scheduler, optimizers)
439
+
440
+ def step(self, *args, **kwargs):
441
+ return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
442
+
443
+
444
+ def prepare_scheduler(accelerator, optimizer, scheduler):
445
+ accelerator.print("Preparing scheduler")
446
+ scheduler = get_optimizer_param_scheduler(optimizer)
447
+ return scheduler
448
+
449
+
450
+ class AbstractTrainStep(ABC):
451
+ """Abstract class for batching, forward pass and loss handler."""
452
+
453
+ def __init__(self, name):
454
+ super().__init__()
455
+ self.name = name
456
+
457
+ def get_batch_func(self):
458
+ pass
459
+
460
+ def get_forward_step_func(self):
461
+ pass
462
+
463
+ def get_loss_func(self):
464
+ pass
465
+
466
+
467
+ class BertTrainStep(AbstractTrainStep):
468
+ """
469
+ Bert train step class.
470
+
471
+ Args:
472
+ args (`argparse.Namespace`): Megatron-LM arguments.
473
+ """
474
+
475
+ def __init__(self, args):
476
+ super().__init__("BertTrainStep")
477
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
478
+ self.loss_func = self.get_loss_func(args.pretraining_flag, args.num_labels)
479
+ self.forward_step = self.get_forward_step_func(args.pretraining_flag, args.bert_binary_head)
480
+ if not args.model_return_dict:
481
+ self.model_output_class = None
482
+ else:
483
+ self.model_output_class = SequenceClassifierOutput
484
+
485
+ def get_batch_func(self, megatron_dataset_flag):
486
+ def get_batch_megatron(data_iterator):
487
+ """Build the batch."""
488
+
489
+ # Items and their type.
490
+ keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
491
+ datatype = torch.int64
492
+
493
+ # Broadcast data.
494
+ if data_iterator is not None:
495
+ data = next(data_iterator)
496
+ else:
497
+ data = None
498
+ data_b = mpu.broadcast_data(keys, data, datatype)
499
+
500
+ # Unpack.
501
+ tokens = data_b["text"].long()
502
+ types = data_b["types"].long()
503
+ sentence_order = data_b["is_random"].long()
504
+ loss_mask = data_b["loss_mask"].float()
505
+ lm_labels = data_b["labels"].long()
506
+ padding_mask = data_b["padding_mask"].long()
507
+
508
+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
509
+
510
+ def get_batch_transformer(data_iterator):
511
+ """Build the batch."""
512
+ data = next(data_iterator)
513
+ data = send_to_device(data, torch.cuda.current_device())
514
+
515
+ # Unpack.
516
+ tokens = data["input_ids"].long()
517
+ padding_mask = data["attention_mask"].long()
518
+ if "token_type_ids" in data:
519
+ types = data["token_type_ids"].long()
520
+ else:
521
+ types = None
522
+ if "labels" in data:
523
+ lm_labels = data["labels"].long()
524
+ loss_mask = (data["labels"] != -100).to(torch.float)
525
+ else:
526
+ lm_labels = None
527
+ loss_mask = None
528
+ if "next_sentence_label" in data:
529
+ sentence_order = data["next_sentence_label"].long()
530
+ else:
531
+ sentence_order = None
532
+
533
+ return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
534
+
535
+ if megatron_dataset_flag:
536
+ return get_batch_megatron
537
+ else:
538
+ return get_batch_transformer
539
+
540
+ def get_loss_func(self, pretraining_flag, num_labels):
541
+ def loss_func_pretrain(loss_mask, sentence_order, output_tensor):
542
+ lm_loss_, sop_logits = output_tensor
543
+
544
+ lm_loss_ = lm_loss_.float()
545
+ loss_mask = loss_mask.float()
546
+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
547
+
548
+ if sop_logits is not None:
549
+ sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)
550
+ sop_loss = sop_loss.float()
551
+ loss = lm_loss + sop_loss
552
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])
553
+ return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]}
554
+
555
+ else:
556
+ loss = lm_loss
557
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
558
+ return loss, {"lm loss": averaged_losses[0]}
559
+
560
+ def loss_func_finetune(labels, logits):
561
+ if num_labels == 1:
562
+ # We are doing regression
563
+ loss_fct = MSELoss()
564
+ loss = loss_fct(logits.view(-1), labels.view(-1))
565
+ elif self.num_labels > 1 and (labels.dtype in (torch.long, torch.int)):
566
+ loss_fct = CrossEntropyLoss()
567
+ loss = loss_fct(logits.view(-1, num_labels), labels.view(-1))
568
+ else:
569
+ loss_fct = BCEWithLogitsLoss()
570
+ loss = loss_fct(logits, labels)
571
+ averaged_losses = average_losses_across_data_parallel_group([loss])
572
+ return loss, {"loss": averaged_losses[0]}
573
+
574
+ if pretraining_flag:
575
+ return loss_func_pretrain
576
+ else:
577
+ return loss_func_finetune
578
+
579
+ def get_forward_step_func(self, pretraining_flag, bert_binary_head):
580
+ def forward_step(data_iterator, model):
581
+ """Forward step."""
582
+ tokens, types, sentence_order, loss_mask, labels, padding_mask = self.get_batch(data_iterator)
583
+ if not bert_binary_head:
584
+ types = None
585
+ # Forward pass through the model.
586
+ if pretraining_flag:
587
+ output_tensor = model(tokens, padding_mask, tokentype_ids=types, lm_labels=labels)
588
+ return output_tensor, partial(self.loss_func, loss_mask, sentence_order)
589
+ else:
590
+ logits = model(tokens, padding_mask, tokentype_ids=types)
591
+ return logits, partial(self.loss_func, labels)
592
+
593
+ return forward_step
594
+
595
+
596
+ class GPTTrainStep(AbstractTrainStep):
597
+ """
598
+ GPT train step class.
599
+
600
+ Args:
601
+ args (`argparse.Namespace`): Megatron-LM arguments.
602
+ """
603
+
604
+ def __init__(self, args):
605
+ super().__init__("GPTTrainStep")
606
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
607
+ self.loss_func = self.get_loss_func()
608
+ self.forward_step = self.get_forward_step_func()
609
+ self.eod_token = args.padded_vocab_size - 1
610
+ if args.vocab_file is not None:
611
+ tokenizer = get_tokenizer()
612
+ self.eod_token = tokenizer.eod
613
+ self.reset_position_ids = args.reset_position_ids
614
+ self.reset_attention_mask = args.reset_attention_mask
615
+ self.eod_mask_loss = args.eod_mask_loss
616
+ if not args.model_return_dict:
617
+ self.model_output_class = None
618
+ else:
619
+ self.model_output_class = CausalLMOutputWithCrossAttentions
620
+
621
+ def get_batch_func(self, megatron_dataset_flag):
622
+ def get_batch_megatron(data_iterator):
623
+ """Generate a batch"""
624
+ # Items and their type.
625
+ keys = ["text"]
626
+ datatype = torch.int64
627
+
628
+ # Broadcast data.
629
+ if data_iterator is not None:
630
+ data = next(data_iterator)
631
+ else:
632
+ data = None
633
+ data_b = mpu.broadcast_data(keys, data, datatype)
634
+
635
+ # Unpack.
636
+ tokens_ = data_b["text"].long()
637
+ labels = tokens_[:, 1:].contiguous()
638
+ tokens = tokens_[:, :-1].contiguous()
639
+
640
+ # Get the masks and postition ids.
641
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
642
+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
643
+ )
644
+
645
+ return tokens, labels, loss_mask, attention_mask, position_ids
646
+
647
+ def get_batch_transformer(data_iterator):
648
+ data = next(data_iterator)
649
+ data = {"input_ids": data["input_ids"]}
650
+ data = send_to_device(data, torch.cuda.current_device())
651
+
652
+ tokens_ = data["input_ids"].long()
653
+ padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token
654
+ tokens_ = torch.concat([tokens_, padding], dim=1)
655
+ labels = tokens_[:, 1:].contiguous()
656
+ tokens = tokens_[:, :-1].contiguous()
657
+ # Get the masks and postition ids.
658
+ attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
659
+ tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True
660
+ )
661
+ return tokens, labels, loss_mask, attention_mask, position_ids
662
+
663
+ if megatron_dataset_flag:
664
+ return get_batch_megatron
665
+ else:
666
+ return get_batch_transformer
667
+
668
+ def get_loss_func(self):
669
+ args = get_args()
670
+
671
+ def loss_func(loss_mask, output_tensor):
672
+ if args.return_logits:
673
+ losses, logits = output_tensor
674
+ else:
675
+ losses = output_tensor
676
+ losses = losses.float()
677
+ loss_mask = loss_mask.view(-1).float()
678
+ loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
679
+
680
+ # Reduce loss for logging.
681
+ averaged_loss = average_losses_across_data_parallel_group([loss])
682
+
683
+ output_dict = {"lm loss": averaged_loss[0]}
684
+ if args.return_logits:
685
+ output_dict.update({"logits": logits})
686
+ return loss, output_dict
687
+
688
+ return loss_func
689
+
690
+ def get_forward_step_func(self):
691
+ def forward_step(data_iterator, model):
692
+ """Forward step."""
693
+ # Get the batch.
694
+ tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
695
+ output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
696
+
697
+ return output_tensor, partial(self.loss_func, loss_mask)
698
+
699
+ return forward_step
700
+
701
+
702
+ class T5TrainStep(AbstractTrainStep):
703
+ """
704
+ T5 train step class.
705
+
706
+ Args:
707
+ args (`argparse.Namespace`): Megatron-LM arguments.
708
+ """
709
+
710
+ def __init__(self, args):
711
+ super().__init__("T5TrainStep")
712
+ self.get_batch = self.get_batch_func(args.megatron_dataset_flag)
713
+ self.loss_func = self.get_loss_func()
714
+ self.forward_step = self.get_forward_step_func()
715
+ if not args.model_return_dict:
716
+ self.model_output_class = None
717
+ else:
718
+ self.model_output_class = Seq2SeqLMOutput
719
+
720
+ @staticmethod
721
+ def attn_mask_postprocess(attention_mask):
722
+ # We create a 3D attention mask from a 2D tensor mask.
723
+ # [b, 1, s]
724
+ attention_mask_b1s = attention_mask.unsqueeze(1)
725
+ # [b, s, 1]
726
+ attention_mask_bs1 = attention_mask.unsqueeze(2)
727
+ # [b, s, s]
728
+ attention_mask_bss = attention_mask_b1s * attention_mask_bs1
729
+ # Convert attention mask to binary:
730
+ extended_attention_mask = attention_mask_bss < 0.5
731
+ return extended_attention_mask
732
+
733
+ @staticmethod
734
+ def get_decoder_mask(seq_length, device):
735
+ attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
736
+ attention_mask = attention_mask < 0.5
737
+ return attention_mask
738
+
739
+ @staticmethod
740
+ def get_enc_dec_mask(attention_mask, dec_seq_length, device):
741
+ batch_size, _ = attention_mask.shape
742
+ # We create a 3D attention mask from a 2D tensor mask.
743
+ # [b, 1, s]
744
+ attention_mask_b1s = attention_mask.unsqueeze(1)
745
+ # [b, s, 1]
746
+ attention_mask_bs1 = torch.ones((batch_size, dec_seq_length, 1), device=device)
747
+ attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
748
+ extended_attention_mask = attention_mask_bss < 0.5
749
+ return extended_attention_mask
750
+
751
+ def get_batch_func(self, megatron_dataset_flag):
752
+ def get_batch_megatron(data_iterator):
753
+ """Build the batch."""
754
+
755
+ keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
756
+ datatype = torch.int64
757
+
758
+ # Broadcast data.
759
+ if data_iterator is not None:
760
+ data = next(data_iterator)
761
+ else:
762
+ data = None
763
+ data_b = mpu.broadcast_data(keys, data, datatype)
764
+
765
+ # Unpack.
766
+ tokens_enc = data_b["text_enc"].long()
767
+ tokens_dec = data_b["text_dec"].long()
768
+ labels = data_b["labels"].long()
769
+ loss_mask = data_b["loss_mask"].float()
770
+
771
+ enc_mask = data_b["enc_mask"] < 0.5
772
+ dec_mask = data_b["dec_mask"] < 0.5
773
+ enc_dec_mask = data_b["enc_dec_mask"] < 0.5
774
+
775
+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
776
+
777
+ def get_batch_transformer(data_iterator):
778
+ """Build the batch."""
779
+ data = next(data_iterator)
780
+ data = send_to_device(data, torch.cuda.current_device())
781
+
782
+ tokens_enc = data["input_ids"].long()
783
+ labels = data["labels"].long()
784
+ loss_mask = (labels != -100).to(torch.float)
785
+ if "decoder_input_ids" in data:
786
+ tokens_dec = data["decoder_input_ids"].long()
787
+ else:
788
+ tokens_dec = labels.new_zeros(labels.shape, device=labels.device, dtype=torch.long)
789
+ tokens_dec[..., 1:] = labels[..., :-1].clone()
790
+ tokens_dec[..., 0] = 0
791
+ tokens_dec.masked_fill_(tokens_dec == -100, 0)
792
+ enc_mask = T5TrainStep.attn_mask_postprocess(data["attention_mask"].long())
793
+ dec_mask = T5TrainStep.get_decoder_mask(tokens_dec.shape[1], tokens_dec.device)
794
+ enc_dec_mask = T5TrainStep.get_enc_dec_mask(
795
+ data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device
796
+ )
797
+
798
+ return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
799
+
800
+ if megatron_dataset_flag:
801
+ return get_batch_megatron
802
+ else:
803
+ return get_batch_transformer
804
+
805
+ def get_loss_func(self):
806
+ def loss_func(loss_mask, output_tensor):
807
+ lm_loss_ = output_tensor.float()
808
+ lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
809
+
810
+ loss = lm_loss
811
+ averaged_losses = average_losses_across_data_parallel_group([lm_loss])
812
+
813
+ return loss, {"lm loss": averaged_losses[0]}
814
+
815
+ return loss_func
816
+
817
+ def get_forward_step_func(self):
818
+ def forward_step(data_iterator, model):
819
+ """Forward step."""
820
+ # Get the batch.
821
+ tokens_enc, tokens_dec, loss_mask, lm_labels, enc_mask, dec_mask, enc_dec_mask = self.get_batch(
822
+ data_iterator
823
+ )
824
+ # Forward model lm_labels
825
+ output_tensor = model(
826
+ tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels
827
+ )
828
+
829
+ return output_tensor, partial(self.loss_func, loss_mask)
830
+
831
+ return forward_step
832
+
833
+
834
+ # intialize megatron setup
835
+ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
836
+ accelerator.print("Initializing Megatron-LM")
837
+ assert torch.cuda.is_available(), "Megatron requires CUDA."
838
+
839
+ # Parse arguments
840
+ args = parse_args(extra_args_provider, ignore_unknown_args=True)
841
+
842
+ # Set defaults
843
+ for key, value in args_defaults.items():
844
+ if getattr(args, key, None) is not None:
845
+ if args.rank == 0:
846
+ print(
847
+ f"WARNING: overriding default arguments for " f"{key}:{getattr(args, key)} with {key}:{value}",
848
+ flush=True,
849
+ )
850
+ setattr(args, key, value)
851
+
852
+ if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False):
853
+ assert args.load is not None, "--use-checkpoints-args requires --load argument"
854
+ load_args_from_checkpoint(args)
855
+
856
+ validate_args(args)
857
+
858
+ # set global args, build tokenizer, and set adlr-autoresume,
859
+ # tensorboard-writer, and timers.
860
+ set_global_variables(args)
861
+
862
+ # torch.distributed initialization
863
+ def finish_mpu_init():
864
+ args = get_args()
865
+ # Pytorch distributed.
866
+ device_count = torch.cuda.device_count()
867
+ args.rank = torch.distributed.get_rank()
868
+ args.world_size = torch.distributed.get_world_size()
869
+ if device_count > 0:
870
+ device = args.rank % device_count
871
+ if args.local_rank is not None:
872
+ assert args.local_rank == device, "expected local-rank to be the same as rank % device-count."
873
+ else:
874
+ args.local_rank = device
875
+
876
+ # Set the tensor model-parallel, pipeline model-parallel, and
877
+ # data-parallel communicators.
878
+ if mpu.model_parallel_is_initialized():
879
+ print("model parallel is already initialized")
880
+ else:
881
+ mpu.initialize_model_parallel(
882
+ args.tensor_model_parallel_size,
883
+ args.pipeline_model_parallel_size,
884
+ args.virtual_pipeline_model_parallel_size,
885
+ args.pipeline_model_parallel_split_rank,
886
+ )
887
+
888
+ # Random seeds for reproducibility.
889
+ if args.rank == 0:
890
+ print(f"> setting random seeds to {args.seed} ...")
891
+ _set_random_seed(args.seed, args.data_parallel_random_init)
892
+
893
+ args = get_args()
894
+
895
+ # Megatron's MPU is the master. Complete initialization right away.
896
+ finish_mpu_init()
897
+
898
+ # Autoresume.
899
+ _init_autoresume()
900
+
901
+ # Compile dependencies.
902
+ _compile_dependencies()
903
+
904
+ # Set pytorch JIT layer fusion options and warmup JIT functions.
905
+ set_jit_fusion_options()
906
+ args = get_args()
907
+ args.padded_vocab_size = _vocab_size_with_padding(args.orig_vocab_size, args)
908
+ if args.model_type_name == "bert" and args.pretraining_flag and args.num_labels == 2:
909
+ args.bert_binary_head = True
910
+ else:
911
+ args.bert_binary_head = False
912
+ args.iteration = 0
913
+
914
+
915
+ class MegatronEngine(torch.nn.Module):
916
+ """
917
+ Megatron-LM model wrapper
918
+
919
+ Args:
920
+ accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.
921
+ model: Megatron-LM model
922
+ optimizer: Megatron-LM optimizer
923
+ lr_scheduler: Megatron-LM lr scheduler
924
+ """
925
+
926
+ def __init__(self, accelerator, model, optimizer, scheduler):
927
+ super().__init__()
928
+ self.module = model
929
+ self.base_model = model[0]
930
+ self.optimizer = optimizer
931
+ self.scheduler = scheduler
932
+ args = get_args()
933
+ if accelerator.state.megatron_lm_plugin.custom_train_step_class is not None:
934
+ self.train_step_handler = accelerator.state.megatron_lm_plugin.custom_train_step_class(
935
+ args, **accelerator.state.megatron_lm_plugin.custom_train_step_kwargs
936
+ )
937
+ elif args.model_type_name == "bert":
938
+ self.train_step_handler = BertTrainStep(args)
939
+ elif args.model_type_name == "gpt":
940
+ self.train_step_handler = GPTTrainStep(args)
941
+ elif args.model_type_name == "t5":
942
+ self.train_step_handler = T5TrainStep(args)
943
+ else:
944
+ raise ValueError(f"Unsupported model type: {args.model_type_name}")
945
+ self.optimizer.skipped_iter = False
946
+
947
+ # Tracking loss.
948
+ self.total_loss_dict = {}
949
+ self.eval_total_loss_dict = {}
950
+ self.iteration = 0
951
+ self.report_memory_flag = True
952
+ if args.tensorboard_dir is not None:
953
+ write_args_to_tensorboard()
954
+
955
+ def train(self):
956
+ for model_module in self.module:
957
+ model_module.train()
958
+ self.log_eval_results()
959
+
960
+ def eval(self):
961
+ for model_module in self.module:
962
+ model_module.eval()
963
+
964
+ def train_step(self, **batch_data):
965
+ """
966
+ Training step for Megatron-LM
967
+
968
+ Args:
969
+ batch_data (:obj:`dict`): The batch data to train on.
970
+ """
971
+
972
+ args = get_args()
973
+ timers = get_timers()
974
+
975
+ if len(batch_data) > 0:
976
+ data_chunks = []
977
+ if args.num_micro_batches > 1:
978
+ for i in range(0, args.num_micro_batches):
979
+ data_chunks.append(
980
+ {
981
+ k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size]
982
+ for k, v in batch_data.items()
983
+ }
984
+ )
985
+ else:
986
+ data_chunks = [batch_data]
987
+
988
+ if len(self.module) > 1:
989
+ batch_data_iterator = (
990
+ [iter(data_chunks) for _ in range(len(self.module))]
991
+ if len(batch_data) > 0
992
+ else [None] * len(self.module)
993
+ )
994
+ else:
995
+ batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None
996
+
997
+ # Set grad to zero.
998
+ if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp:
999
+ for partition in self.module:
1000
+ partition.zero_grad_buffer()
1001
+ self.optimizer.zero_grad()
1002
+
1003
+ # Forward pass.
1004
+ forward_backward_func = get_forward_backward_func()
1005
+ losses_reduced = forward_backward_func(
1006
+ self.train_step_handler.forward_step,
1007
+ batch_data_iterator,
1008
+ self.module,
1009
+ self.optimizer,
1010
+ None,
1011
+ forward_only=False,
1012
+ )
1013
+
1014
+ # Empty unused memory.
1015
+ if args.empty_unused_memory_level >= 1:
1016
+ torch.cuda.empty_cache()
1017
+
1018
+ # Reduce gradients.
1019
+ timers("backward-reduce-model-grads").start()
1020
+ self.optimizer.reduce_model_grads(args, timers)
1021
+ timers("backward-reduce-model-grads").stop()
1022
+
1023
+ # Update parameters.
1024
+ timers("optimizer").start()
1025
+ update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)
1026
+ timers("optimizer").stop()
1027
+
1028
+ # Gather params.
1029
+ if update_successful:
1030
+ timers("backward-gather-model-params").start()
1031
+ self.optimizer.gather_model_params(args, timers)
1032
+ timers("backward-gather-model-params").stop()
1033
+
1034
+ # Update learning rate.
1035
+ if update_successful:
1036
+ if self.scheduler is not None:
1037
+ increment = get_num_microbatches() * args.micro_batch_size * args.data_parallel_size
1038
+ self.scheduler.step(increment=increment)
1039
+ skipped_iter = 0
1040
+ else:
1041
+ skipped_iter = 1
1042
+
1043
+ self.optimizer.skipped_iter = not update_successful
1044
+
1045
+ # Empty unused memory.
1046
+ if args.empty_unused_memory_level >= 2:
1047
+ torch.cuda.empty_cache()
1048
+
1049
+ args.consumed_train_samples += (
1050
+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
1051
+ )
1052
+
1053
+ if mpu.is_pipeline_last_stage(ignore_virtual=True):
1054
+ # Average loss across microbatches.
1055
+ loss_reduced = {}
1056
+ for key in losses_reduced[0]:
1057
+ losses_reduced_for_key = [x[key] for x in losses_reduced]
1058
+ if len(losses_reduced_for_key[0].shape) == 0:
1059
+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
1060
+ else:
1061
+ loss_reduced[key] = torch.concat(losses_reduced_for_key)
1062
+ return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
1063
+ return {}, skipped_iter, grad_norm, num_zeros_in_grad
1064
+
1065
+ def eval_step(self, **batch_data):
1066
+ """
1067
+ Evaluation step for Megatron-LM
1068
+
1069
+ Args:
1070
+ batch_data (:obj:`dict`): The batch data to evaluate on.
1071
+ """
1072
+
1073
+ args = get_args()
1074
+ data_chunks = []
1075
+ if args.num_micro_batches > 1:
1076
+ for i in range(0, args.num_micro_batches):
1077
+ data_chunks.append(
1078
+ {k: v[i * args.micro_batch_size : (i + 1) * args.micro_batch_size] for k, v in batch_data.items()}
1079
+ )
1080
+ else:
1081
+ data_chunks = [batch_data]
1082
+
1083
+ if len(self.module) > 1:
1084
+ batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]
1085
+ else:
1086
+ batch_data_iterator = iter(data_chunks)
1087
+ forward_backward_func = get_forward_backward_func()
1088
+ loss_dicts = forward_backward_func(
1089
+ self.train_step_handler.forward_step,
1090
+ batch_data_iterator,
1091
+ self.module,
1092
+ optimizer=None,
1093
+ timers=None,
1094
+ forward_only=True,
1095
+ )
1096
+ # Empty unused memory
1097
+ if args.empty_unused_memory_level >= 1:
1098
+ torch.cuda.empty_cache()
1099
+
1100
+ args.consumed_valid_samples += (
1101
+ mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
1102
+ )
1103
+
1104
+ if mpu.is_pipeline_last_stage(ignore_virtual=True):
1105
+ # Average loss across microbatches.
1106
+ loss_reduced = {}
1107
+ for key in loss_dicts[0]:
1108
+ losses_reduced_for_key = [x[key] for x in loss_dicts]
1109
+ if len(losses_reduced_for_key[0].shape) == 0:
1110
+ loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key)
1111
+ else:
1112
+ loss_reduced[key] = torch.concat(losses_reduced_for_key)
1113
+ return loss_reduced
1114
+ else:
1115
+ return {}
1116
+
1117
+ def forward(self, **batch_data):
1118
+ # During training, we use train_step()
1119
+ # model(**batch_data) performs following operations by delegating it to `self.train_step`:
1120
+ # 1. Prepare **batch_data for Tendor, Pipeline and Model Parallelism
1121
+ # 2. Set grad to zero.
1122
+ # 3. forward pass and backward pass using Pipeline Parallelism
1123
+ # 4. Empty unused memory.
1124
+ # 5. Reduce gradients.
1125
+ # 6. Update parameters.
1126
+ # 7. Gather params when using Distributed Optimizer (Data Parallelism).
1127
+ # 8. Update learning rate if scheduler is specified.
1128
+ # 9. Empty unused memory.
1129
+ # 10. Average loss across microbatches and across DP ranks.
1130
+ #
1131
+ # During evaluation, we use eval_step()
1132
+ args = get_args()
1133
+ if self.module[0].training:
1134
+ loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = self.train_step(**batch_data)
1135
+ self.iteration += 1
1136
+ if args.tensorboard_dir is not None:
1137
+ # Logging.
1138
+ loss_scale = self.optimizer.get_loss_scale().item()
1139
+ params_norm = None
1140
+ if args.log_params_norm:
1141
+ params_norm = calc_params_l2_norm(self.model)
1142
+ self.report_memory_flag = training_log(
1143
+ loss_dict,
1144
+ self.total_loss_dict,
1145
+ self.optimizer.param_groups[0]["lr"],
1146
+ self.iteration,
1147
+ loss_scale,
1148
+ self.report_memory_flag,
1149
+ skipped_iter,
1150
+ grad_norm,
1151
+ params_norm,
1152
+ num_zeros_in_grad,
1153
+ )
1154
+ else:
1155
+ loss_dict = self.eval_step(**batch_data)
1156
+ if args.tensorboard_dir is not None:
1157
+ for key in loss_dict:
1158
+ self.eval_total_loss_dict[key] = (
1159
+ self.eval_total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key]
1160
+ )
1161
+ self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get(
1162
+ key + "_num_iters", torch.cuda.FloatTensor([0.0])
1163
+ ) + torch.cuda.FloatTensor([1.0])
1164
+
1165
+ loss = torch.tensor(0.0, device=args.local_rank)
1166
+ for key in loss_dict:
1167
+ if len(loss_dict[key].shape) == 0:
1168
+ loss += loss_dict[key]
1169
+
1170
+ logits = None
1171
+ if "logits" in loss_dict:
1172
+ logits = loss_dict["logits"]
1173
+ # loss = reduce(loss)
1174
+ if self.train_step_handler.model_output_class is not None:
1175
+ return self.train_step_handler.model_output_class(loss=loss, logits=logits)
1176
+ return loss
1177
+
1178
+ def log_eval_results(self):
1179
+ args = get_args()
1180
+ if args.tensorboard_dir is None or self.iteration == 0:
1181
+ return
1182
+ args = get_args()
1183
+ writer = get_tensorboard_writer()
1184
+ string = f"validation loss at iteration {self.iteration} | "
1185
+ for key in self.eval_total_loss_dict:
1186
+ if key.endswith("_num_iters"):
1187
+ continue
1188
+ value = self.eval_total_loss_dict[key] / self.eval_total_loss_dict[key + "_num_iters"]
1189
+ string += f"{key} value: {value} | "
1190
+ ppl = math.exp(min(20, value.item()))
1191
+ if args.pretraining_flag:
1192
+ string += f"{key} PPL: {ppl} | "
1193
+ if writer:
1194
+ writer.add_scalar(f"{key} validation", value.item(), self.iteration)
1195
+ if args.pretraining_flag:
1196
+ writer.add_scalar(f"{key} validation ppl", ppl, self.iteration)
1197
+
1198
+ length = len(string) + 1
1199
+ print_rank_last("-" * length)
1200
+ print_rank_last(string)
1201
+ print_rank_last("-" * length)
1202
+ self.eval_total_loss_dict = {}
1203
+
1204
+ def save_checkpoint(self, output_dir):
1205
+ self.log_eval_results()
1206
+ args = get_args()
1207
+ args.save = output_dir
1208
+ torch.distributed.barrier()
1209
+ save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
1210
+ torch.distributed.barrier()
1211
+
1212
+ def load_checkpoint(self, input_dir):
1213
+ args = get_args()
1214
+ args.load = input_dir
1215
+ args.consumed_train_samples = 0
1216
+ args.consumed_valid_samples = 0
1217
+ torch.distributed.barrier()
1218
+ iteration = load_checkpoint(self.module, self.optimizer, self.scheduler)
1219
+ torch.distributed.barrier()
1220
+ self.iteration = iteration
1221
+ if args.fp16 and self.iteration == 0:
1222
+ self.optimizer.reload_model_params()
1223
+
1224
+ def megatron_generate(
1225
+ self,
1226
+ inputs,
1227
+ attention_mask=None,
1228
+ max_length=None,
1229
+ max_new_tokens=None,
1230
+ num_beams=None,
1231
+ temperature=None,
1232
+ top_k=None,
1233
+ top_p=None,
1234
+ length_penalty=None,
1235
+ **kwargs,
1236
+ ):
1237
+ """
1238
+ Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along
1239
+ with sampling. Refer the Megatron-LM repo for more details
1240
+
1241
+ Args:
1242
+ inputs (torch.Tensor): input ids
1243
+ attention_mask (torch.Tensor, optional): attention mask. Defaults to None.
1244
+ max_length (int, optional): max length of the generated sequence. Defaults to None.
1245
+ Either this or max_new_tokens should be provided.
1246
+ max_new_tokens (int, optional): max number of tokens to be generated. Defaults to None.
1247
+ Either this or max_length should be provided.
1248
+ num_beams (int, optional): number of beams to use for beam search. Defaults to None.
1249
+ temperature (float, optional): temperature for sampling. Defaults to 1.0.
1250
+ top_k (int, optional): top k tokens to consider for sampling. Defaults to 0.0.
1251
+ top_p (float, optional): tokens in top p probability are considered for sampling. Defaults to 0.0.
1252
+ length_penalty (float, optional): length penalty for beam search. Defaults to None.
1253
+ kwargs: additional key-value arguments
1254
+ """
1255
+
1256
+ # checking if required arguments are passed
1257
+ args = get_args()
1258
+ if args.model_type_name != "gpt":
1259
+ raise NotImplementedError("Generate method is not implemented for this model")
1260
+
1261
+ if args.data_parallel_size > 1:
1262
+ raise ValueError("Generate method requires data parallelism to be 1")
1263
+
1264
+ if args.sequence_parallel:
1265
+ raise ValueError("Generate method requires sequence parallelism to be False")
1266
+
1267
+ if args.recompute_granularity is not None:
1268
+ raise ValueError("Checkpoint activations cannot be set for inference")
1269
+
1270
+ if args.vocab_file is None:
1271
+ raise ValueError("Vocab file is required for inference")
1272
+
1273
+ # Prepare inputs
1274
+ if max_length is None and max_new_tokens is None:
1275
+ raise ValueError("`max_length` or `max_new_tokens` are required for inference")
1276
+
1277
+ if temperature is None:
1278
+ temperature = 1.0
1279
+ elif not (0.0 < temperature <= 100.0):
1280
+ raise ValueError("temperature must be a positive number less than or equal to 100.0")
1281
+
1282
+ if top_k is None:
1283
+ top_k = 0
1284
+ elif not (0 <= top_k <= 1000):
1285
+ raise ValueError("top_k must be a positive number less than or equal to 1000")
1286
+
1287
+ if top_p is None:
1288
+ top_p = 0.0
1289
+ elif top_p > 0.0 and top_k > 0.0:
1290
+ raise ValueError("top_p and top_k sampling cannot be set together")
1291
+ else:
1292
+ if not (0.0 <= top_p <= 1.0):
1293
+ raise ValueError("top_p must be less than or equal to 1.0")
1294
+
1295
+ top_p_decay = kwargs.get("top_p_decay", 0.0)
1296
+ if not (0.0 <= top_p_decay <= 1.0):
1297
+ raise ValueError("top_p_decay must be less than or equal to 1.0")
1298
+
1299
+ top_p_bound = kwargs.get("top_p_bound", 0.0)
1300
+ if not (0.0 <= top_p_bound <= 1.0):
1301
+ raise ValueError("top_p_bound must be less than or equal to 1.0")
1302
+
1303
+ add_BOS = kwargs.get("add_BOS", False)
1304
+ if not (isinstance(add_BOS, bool)):
1305
+ raise ValueError("add_BOS must be a boolean")
1306
+
1307
+ beam_width = num_beams
1308
+ if beam_width is not None:
1309
+ if not isinstance(beam_width, int):
1310
+ raise ValueError("beam_width must be an integer")
1311
+ if beam_width < 1:
1312
+ raise ValueError("beam_width must be greater than 0")
1313
+ if inputs.shape[0] > 1:
1314
+ return "When doing beam_search, batch size must be 1"
1315
+
1316
+ tokenizer = get_tokenizer()
1317
+
1318
+ stop_token = kwargs.get("stop_token", tokenizer.eod)
1319
+ if stop_token is not None:
1320
+ if not isinstance(stop_token, int):
1321
+ raise ValueError("stop_token must be an integer")
1322
+
1323
+ if length_penalty is None:
1324
+ length_penalty = 1.0
1325
+
1326
+ sizes_list = None
1327
+ prompts_tokens_tensor = None
1328
+ prompts_length_tensor = None
1329
+ if torch.distributed.get_rank() == 0:
1330
+ # Get the prompts length.
1331
+ if attention_mask is None:
1332
+ prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])
1333
+ else:
1334
+ prompts_length_tensor = attention_mask.sum(axis=-1).cuda()
1335
+
1336
+ if max_new_tokens is None:
1337
+ max_new_tokens = max_length - inputs.shape[1]
1338
+ if max_new_tokens <= 0:
1339
+ raise ValueError("max_new_tokens must be greater than 0")
1340
+
1341
+ if add_BOS:
1342
+ max_length = max_new_tokens + inputs.shape[1] + 1
1343
+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels
1344
+ max_length = 4 * math.ceil(max_length / 4)
1345
+ max_new_tokens = max_length - (inputs.shape[1] + 1)
1346
+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1347
+ prompts_tokens_tensor = torch.concat(
1348
+ [torch.unsqueeze(padding[:, 0], axis=-1), inputs.cuda(), padding], axis=-1
1349
+ )
1350
+ else:
1351
+ # making sure that `max_length` is a multiple of 4 to leverage fused kernels
1352
+ max_length = max_new_tokens + inputs.shape[1]
1353
+ max_length = 4 * math.ceil(max_length / 4)
1354
+ max_new_tokens = max_length - inputs.shape[1]
1355
+ padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1356
+ prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)
1357
+
1358
+ # We need the sizes of these tensors for the boradcast
1359
+ sizes_list = [
1360
+ prompts_tokens_tensor.size(0), # Batch size
1361
+ prompts_tokens_tensor.size(1),
1362
+ ] # Sequence lenght
1363
+
1364
+ # First, broadcast the sizes.
1365
+ sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)
1366
+
1367
+ # Now that we have the sizes, we can boradcast the tokens
1368
+ # and length tensors.
1369
+ sizes = sizes_tensor.tolist()
1370
+ context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)
1371
+ context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)
1372
+
1373
+ # Run the inference
1374
+ random_seed = kwargs.get("random_seed", 0)
1375
+ torch.random.manual_seed(random_seed)
1376
+ unwrapped_model = unwrap_model(self.base_model, (torchDDP, LocalDDP, Float16Module))
1377
+ if beam_width is not None:
1378
+ tokens, _ = beam_search_and_return_on_first_stage(
1379
+ unwrapped_model,
1380
+ context_tokens_tensor,
1381
+ context_length_tensor,
1382
+ beam_width,
1383
+ stop_token=stop_token,
1384
+ num_return_gen=1,
1385
+ length_penalty=length_penalty,
1386
+ )
1387
+ else:
1388
+ tokens, _, _ = generate_tokens_probs_and_return_on_first_stage(
1389
+ unwrapped_model,
1390
+ context_tokens_tensor,
1391
+ context_length_tensor,
1392
+ return_output_log_probs=False,
1393
+ top_k=top_k,
1394
+ top_p=top_p,
1395
+ top_p_decay=top_p_decay,
1396
+ top_p_bound=top_p_bound,
1397
+ temperature=temperature,
1398
+ use_eod_token_for_early_termination=True,
1399
+ )
1400
+ return tokens
1401
+
1402
+
1403
+ # other utilities
1404
+ def avg_losses_across_data_parallel_group(losses):
1405
+ """
1406
+ Average losses across data parallel group.
1407
+
1408
+ Args:
1409
+ losses (List[Tensor]): List of losses to average across data parallel group.
1410
+ """
1411
+
1412
+ return average_losses_across_data_parallel_group(losses)
1413
+
1414
+
1415
+ def gather_across_data_parallel_groups(tensor):
1416
+ """
1417
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.
1418
+
1419
+ Args:
1420
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
1421
+ The data to gather across data parallel ranks.
1422
+
1423
+ """
1424
+
1425
+ def _gpu_gather_one(tensor):
1426
+ if tensor.ndim == 0:
1427
+ tensor = tensor.clone()[None]
1428
+ output_tensors = [
1429
+ torch.empty_like(tensor)
1430
+ for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group()))
1431
+ ]
1432
+ torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())
1433
+ return torch.cat(output_tensors, dim=0)
1434
+
1435
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
llmeval-env/lib/python3.10/site-packages/accelerate/utils/modeling.py ADDED
@@ -0,0 +1,1802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import contextlib
16
+ import gc
17
+ import importlib
18
+ import inspect
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import shutil
24
+ import tempfile
25
+ import warnings
26
+ from collections import OrderedDict, defaultdict
27
+ from typing import Dict, List, Optional, Tuple, Union
28
+
29
+ import packaging
30
+ import torch
31
+ import torch.nn as nn
32
+
33
+ from ..state import AcceleratorState
34
+ from .constants import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
35
+ from .dataclasses import AutocastKwargs, CustomDtype, DistributedType
36
+ from .imports import (
37
+ is_mlu_available,
38
+ is_mps_available,
39
+ is_npu_available,
40
+ is_peft_available,
41
+ is_torch_xla_available,
42
+ is_xpu_available,
43
+ )
44
+ from .offload import load_offloaded_weight, offload_weight, save_offload_index
45
+ from .tqdm import is_tqdm_available, tqdm
46
+ from .versions import compare_versions
47
+
48
+
49
+ if is_npu_available(check_device=False):
50
+ import torch_npu # noqa: F401
51
+
52
+ if is_mlu_available(check_device=False):
53
+ import torch_mlu # noqa: F401
54
+
55
+ from safetensors import safe_open
56
+ from safetensors.torch import load_file as safe_load_file
57
+
58
+
59
+ WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ def is_peft_model(model):
65
+ from .other import extract_model_from_parallel
66
+
67
+ if is_peft_available():
68
+ from peft import PeftModel
69
+
70
+ return is_peft_available() and isinstance(extract_model_from_parallel(model), PeftModel)
71
+
72
+
73
+ def check_device_same(first_device, second_device):
74
+ """
75
+ Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False`
76
+ for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same
77
+
78
+ Args:
79
+ first_device (`torch.device`):
80
+ First device to check
81
+ second_device (`torch.device`):
82
+ Second device to check
83
+ """
84
+ if first_device.type != second_device.type:
85
+ return False
86
+
87
+ if first_device.type == "cuda" and first_device.index is None:
88
+ # In case the first_device is a cuda device and have
89
+ # the index attribute set to `None`, default it to `0`
90
+ first_device = torch.device("cuda", index=0)
91
+
92
+ if second_device.type == "cuda" and second_device.index is None:
93
+ # In case the second_device is a cuda device and have
94
+ # the index attribute set to `None`, default it to `0`
95
+ second_device = torch.device("cuda", index=0)
96
+
97
+ return first_device == second_device
98
+
99
+
100
+ def convert_file_size_to_int(size: Union[int, str]):
101
+ """
102
+ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
103
+
104
+ Args:
105
+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
106
+
107
+ Example:
108
+
109
+ ```py
110
+ >>> convert_file_size_to_int("1MiB")
111
+ 1048576
112
+ ```
113
+ """
114
+ mem_size = -1
115
+ err_msg = (
116
+ f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')."
117
+ )
118
+ try:
119
+ if isinstance(size, int):
120
+ mem_size = size
121
+ elif size.upper().endswith("GIB"):
122
+ mem_size = int(float(size[:-3]) * (2**30))
123
+ elif size.upper().endswith("MIB"):
124
+ mem_size = int(float(size[:-3]) * (2**20))
125
+ elif size.upper().endswith("KIB"):
126
+ mem_size = int(float(size[:-3]) * (2**10))
127
+ elif size.upper().endswith("GB"):
128
+ int_size = int(float(size[:-2]) * (10**9))
129
+ mem_size = int_size // 8 if size.endswith("b") else int_size
130
+ elif size.upper().endswith("MB"):
131
+ int_size = int(float(size[:-2]) * (10**6))
132
+ mem_size = int_size // 8 if size.endswith("b") else int_size
133
+ elif size.upper().endswith("KB"):
134
+ int_size = int(float(size[:-2]) * (10**3))
135
+ mem_size = int_size // 8 if size.endswith("b") else int_size
136
+ except ValueError:
137
+ raise ValueError(err_msg)
138
+
139
+ if mem_size < 0:
140
+ raise ValueError(err_msg)
141
+ return mem_size
142
+
143
+
144
+ def dtype_byte_size(dtype: torch.dtype):
145
+ """
146
+ Returns the size (in bytes) occupied by one parameter of type `dtype`.
147
+
148
+ Example:
149
+
150
+ ```py
151
+ >>> dtype_byte_size(torch.float32)
152
+ 4
153
+ ```
154
+ """
155
+ if dtype == torch.bool:
156
+ return 1 / 8
157
+ elif dtype == CustomDtype.INT2:
158
+ return 1 / 4
159
+ elif dtype == CustomDtype.INT4:
160
+ return 1 / 2
161
+ elif dtype == CustomDtype.FP8:
162
+ return 1
163
+ bit_search = re.search(r"[^\d](\d+)$", str(dtype))
164
+ if bit_search is None:
165
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
166
+ bit_size = int(bit_search.groups()[0])
167
+ return bit_size // 8
168
+
169
+
170
+ def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
171
+ """
172
+ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
173
+ example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
174
+ guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
175
+ non-overlapping lifetimes may have the same id.
176
+ """
177
+ _SIZE = {
178
+ torch.int64: 8,
179
+ torch.float32: 4,
180
+ torch.int32: 4,
181
+ torch.bfloat16: 2,
182
+ torch.float16: 2,
183
+ torch.int16: 2,
184
+ torch.uint8: 1,
185
+ torch.int8: 1,
186
+ torch.bool: 1,
187
+ torch.float64: 8,
188
+ }
189
+ try:
190
+ storage_ptr = tensor.untyped_storage().data_ptr()
191
+ storage_size = tensor.untyped_storage().nbytes()
192
+ except Exception:
193
+ # Fallback for torch==1.10
194
+ try:
195
+ storage_ptr = tensor.storage().data_ptr()
196
+ storage_size = tensor.storage().size() * _SIZE[tensor.dtype]
197
+ except NotImplementedError:
198
+ # Fallback for meta storage
199
+ storage_ptr = 0
200
+ # On torch >=2.0 this is the tensor size
201
+ storage_size = tensor.nelement() * _SIZE[tensor.dtype]
202
+
203
+ return tensor.device, storage_ptr, storage_size
204
+
205
+
206
+ def shard_checkpoint(
207
+ state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
208
+ ):
209
+ """
210
+ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
211
+ given size.
212
+
213
+ The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
214
+ optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
215
+ limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
216
+ [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
217
+
218
+ <Tip warning={true}>
219
+
220
+ If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
221
+ have a size greater than `max_shard_size`.
222
+
223
+ </Tip>
224
+
225
+ Args:
226
+ state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
227
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
228
+ The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
229
+ (like `"5MB"`).
230
+ weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`):
231
+ The name of the model save file.
232
+ """
233
+ max_shard_size = convert_file_size_to_int(max_shard_size)
234
+
235
+ sharded_state_dicts = [{}]
236
+ last_block_size = 0
237
+ total_size = 0
238
+ storage_id_to_block = {}
239
+
240
+ for key, weight in state_dict.items():
241
+ # when bnb serialization is used the weights in the state dict can be strings
242
+ # check: https://github.com/huggingface/transformers/pull/24416 for more details
243
+ if isinstance(weight, str):
244
+ continue
245
+ else:
246
+ storage_id = id_tensor_storage(weight)
247
+
248
+ # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block`
249
+ if storage_id in storage_id_to_block:
250
+ block_id = storage_id_to_block[storage_id]
251
+ sharded_state_dicts[block_id][key] = weight
252
+ continue
253
+
254
+ weight_size = weight.numel() * dtype_byte_size(weight.dtype)
255
+
256
+ # If this weight is going to tip up over the maximal size, we split.
257
+ if last_block_size + weight_size > max_shard_size:
258
+ sharded_state_dicts.append({})
259
+ last_block_size = 0
260
+
261
+ sharded_state_dicts[-1][key] = weight
262
+ last_block_size += weight_size
263
+ total_size += weight_size
264
+ storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1
265
+
266
+ # If we only have one shard, we return it
267
+ if len(sharded_state_dicts) == 1:
268
+ return {weights_name: sharded_state_dicts[0]}, None
269
+
270
+ # Otherwise, let's build the index
271
+ weight_map = {}
272
+ shards = {}
273
+ for idx, shard in enumerate(sharded_state_dicts):
274
+ shard_file = weights_name.replace(".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin")
275
+ shard_file = shard_file.replace(
276
+ ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors"
277
+ )
278
+ shards[shard_file] = shard
279
+ for key in shard.keys():
280
+ weight_map[key] = shard_file
281
+
282
+ # Add the metadata
283
+ metadata = {"total_size": total_size}
284
+ index = {"metadata": metadata, "weight_map": weight_map}
285
+ return shards, index
286
+
287
+
288
+ def set_module_tensor_to_device(
289
+ module: nn.Module,
290
+ tensor_name: str,
291
+ device: Union[int, str, torch.device],
292
+ value: Optional[torch.Tensor] = None,
293
+ dtype: Optional[Union[str, torch.dtype]] = None,
294
+ fp16_statistics: Optional[torch.HalfTensor] = None,
295
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
296
+ ):
297
+ """
298
+ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
299
+ `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).
300
+
301
+ Args:
302
+ module (`torch.nn.Module`):
303
+ The module in which the tensor we want to move lives.
304
+ tensor_name (`str`):
305
+ The full name of the parameter/buffer.
306
+ device (`int`, `str` or `torch.device`):
307
+ The device on which to set the tensor.
308
+ value (`torch.Tensor`, *optional*):
309
+ The value of the tensor (useful when going from the meta device to any other device).
310
+ dtype (`torch.dtype`, *optional*):
311
+ If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to
312
+ the dtype of the existing parameter in the model.
313
+ fp16_statistics (`torch.HalfTensor`, *optional*):
314
+ The list of fp16 statistics to set on the module, used for 8 bit model serialization.
315
+ tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`):
316
+ A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given
317
+ execution device, this parameter is useful to reuse the first available pointer of a shared weight on the
318
+ device for all others, instead of duplicating memory.
319
+ """
320
+ # Recurse if needed
321
+ if "." in tensor_name:
322
+ splits = tensor_name.split(".")
323
+ for split in splits[:-1]:
324
+ new_module = getattr(module, split)
325
+ if new_module is None:
326
+ raise ValueError(f"{module} has no attribute {split}.")
327
+ module = new_module
328
+ tensor_name = splits[-1]
329
+
330
+ if tensor_name not in module._parameters and tensor_name not in module._buffers:
331
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
332
+ is_buffer = tensor_name in module._buffers
333
+ old_value = getattr(module, tensor_name)
334
+
335
+ # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight
336
+ # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer.
337
+ if (
338
+ value is not None
339
+ and tied_params_map is not None
340
+ and value.data_ptr() in tied_params_map
341
+ and device in tied_params_map[value.data_ptr()]
342
+ ):
343
+ module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device]
344
+ return
345
+ elif (
346
+ tied_params_map is not None
347
+ and old_value.data_ptr() in tied_params_map
348
+ and device in tied_params_map[old_value.data_ptr()]
349
+ ):
350
+ module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device]
351
+ return
352
+
353
+ if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
354
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
355
+
356
+ if value is not None:
357
+ if old_value.shape != value.shape:
358
+ raise ValueError(
359
+ f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.'
360
+ )
361
+
362
+ if dtype is None:
363
+ # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model
364
+ value = value.to(old_value.dtype)
365
+ elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
366
+ value = value.to(dtype)
367
+
368
+ param = module._parameters[tensor_name] if tensor_name in module._parameters else None
369
+ param_cls = type(param)
370
+
371
+ device_quantization = None
372
+ with torch.no_grad():
373
+ # leave it on cpu first before moving them to cuda
374
+ # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0
375
+ if (
376
+ param is not None
377
+ and param.device.type != "cuda"
378
+ and torch.device(device).type == "cuda"
379
+ and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"]
380
+ ):
381
+ device_quantization = device
382
+ device = "cpu"
383
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
384
+ if isinstance(device, int):
385
+ if is_npu_available():
386
+ device = f"npu:{device}"
387
+ elif is_mlu_available():
388
+ device = f"mlu:{device}"
389
+ elif is_xpu_available():
390
+ device = f"xpu:{device}"
391
+ if value is None:
392
+ new_value = old_value.to(device)
393
+ if dtype is not None and device in ["meta", torch.device("meta")]:
394
+ if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
395
+ new_value = new_value.to(dtype)
396
+
397
+ if not is_buffer:
398
+ module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad)
399
+ elif isinstance(value, torch.Tensor):
400
+ new_value = value.to(device)
401
+ else:
402
+ new_value = torch.tensor(value, device=device)
403
+ if device_quantization is not None:
404
+ device = device_quantization
405
+ if is_buffer:
406
+ module._buffers[tensor_name] = new_value
407
+ elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device):
408
+ param_cls = type(module._parameters[tensor_name])
409
+ kwargs = module._parameters[tensor_name].__dict__
410
+ if param_cls.__name__ in ["Int8Params", "FP4Params"]:
411
+ if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32:
412
+ # downcast to fp16 if any - needed for 8bit serialization
413
+ new_value = new_value.to(torch.float16)
414
+ # quantize module that are going to stay on the cpu so that we offload quantized weights
415
+ if device == "cpu" and param_cls.__name__ == "Int8Params":
416
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu")
417
+ new_value.CB = new_value.CB.to("cpu")
418
+ new_value.SCB = new_value.SCB.to("cpu")
419
+ else:
420
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device)
421
+ elif param_cls.__name__ in ["QTensor", "QBitsTensor"]:
422
+ new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device)
423
+ else:
424
+ new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device)
425
+
426
+ module._parameters[tensor_name] = new_value
427
+ if fp16_statistics is not None:
428
+ module._parameters[tensor_name].SCB = fp16_statistics.to(device)
429
+ del fp16_statistics
430
+ # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight
431
+ if (
432
+ module.__class__.__name__ == "Linear8bitLt"
433
+ and getattr(module.weight, "SCB", None) is None
434
+ and str(module.weight.device) != "meta"
435
+ ):
436
+ # quantize only if necessary
437
+ device_index = torch.device(device).index if torch.device(device).type == "cuda" else None
438
+ if not getattr(module.weight, "SCB", None) and device_index is not None:
439
+ if module.bias is not None and module.bias.device.type != "meta":
440
+ # if a bias exists, we need to wait until the bias is set on the correct device
441
+ module = module.cuda(device_index)
442
+ elif module.bias is None:
443
+ # if no bias exists, we can quantize right away
444
+ module = module.cuda(device_index)
445
+ elif module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None:
446
+ # quantize only if necessary
447
+ device_index = torch.device(device).index if torch.device(device).type == "cuda" else None
448
+ if not getattr(module.weight, "quant_state", None) and device_index is not None:
449
+ module.weight = module.weight.cuda(device_index)
450
+ # clean pre and post foward hook
451
+ if device != "cpu":
452
+ if is_npu_available():
453
+ torch.npu.empty_cache()
454
+ elif is_mlu_available():
455
+ torch.mlu.empty_cache()
456
+ elif is_xpu_available():
457
+ torch.xpu.empty_cache()
458
+ else:
459
+ torch.cuda.empty_cache()
460
+
461
+ # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in
462
+ # order to avoid duplicating memory, see above.
463
+ if (
464
+ tied_params_map is not None
465
+ and old_value.data_ptr() in tied_params_map
466
+ and device not in tied_params_map[old_value.data_ptr()]
467
+ ):
468
+ tied_params_map[old_value.data_ptr()][device] = new_value
469
+ elif (
470
+ value is not None
471
+ and tied_params_map is not None
472
+ and value.data_ptr() in tied_params_map
473
+ and device not in tied_params_map[value.data_ptr()]
474
+ ):
475
+ tied_params_map[value.data_ptr()][device] = new_value
476
+
477
+
478
+ def named_module_tensors(
479
+ module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False
480
+ ):
481
+ """
482
+ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
483
+ it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
484
+
485
+ Args:
486
+ module (`torch.nn.Module`):
487
+ The module we want the tensors on.
488
+ include_buffer (`bool`, *optional*, defaults to `True`):
489
+ Whether or not to include the buffers in the result.
490
+ recurse (`bool`, *optional`, defaults to `False`):
491
+ Whether or not to go look in every submodule or just return the direct parameters and buffers.
492
+ remove_non_persistent (`bool`, *optional*, defaults to `False`):
493
+ Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers =
494
+ True
495
+ """
496
+ yield from module.named_parameters(recurse=recurse)
497
+
498
+ if include_buffers:
499
+ non_persistent_buffers = set()
500
+ if remove_non_persistent:
501
+ non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse)
502
+ for named_buffer in module.named_buffers(recurse=recurse):
503
+ name, _ = named_buffer
504
+ if name not in non_persistent_buffers:
505
+ yield named_buffer
506
+
507
+
508
+ def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
509
+ """
510
+ Gather all non persistent buffers of a given modules into a set
511
+
512
+ Args:
513
+ module (`nn.Module`):
514
+ The module we want the non persistent buffers on.
515
+ recurse (`bool`, *optional*, defaults to `False`):
516
+ Whether or not to go look in every submodule or just return the direct non persistent buffers.
517
+ """
518
+
519
+ non_persistent_buffers_set = module._non_persistent_buffers_set
520
+ if recurse:
521
+ for _, m in module.named_modules():
522
+ non_persistent_buffers_set |= m._non_persistent_buffers_set
523
+
524
+ return non_persistent_buffers_set
525
+
526
+
527
+ class FindTiedParametersResult(list):
528
+ """
529
+ This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
530
+ a list or on the `values` method as in the future this will be removed.
531
+ """
532
+
533
+ def __init__(self, *args, **kwargs):
534
+ super().__init__(*args, **kwargs)
535
+
536
+ def values(self):
537
+ # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
538
+ return sum([x[1:] for x in self], [])
539
+
540
+
541
+ def check_tied_parameters_in_config(model: nn.Module):
542
+ """
543
+ Check if there is any indication in the given model that some weights should be tied.
544
+
545
+ Args:
546
+ model (`torch.nn.Module`): The model to inspect
547
+
548
+ Returns:
549
+ bool: True if the model needs to have tied weights
550
+ """
551
+
552
+ # based on model.tie_weights() method
553
+ has_tied_word_embedding = False
554
+ has_tied_encoder_decoder = False
555
+ has_tied_module = False
556
+
557
+ if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]:
558
+ has_tied_word_embedding = (
559
+ hasattr(model, "config")
560
+ and getattr(model.config, "tie_word_embeddings", False)
561
+ and model.get_output_embeddings()
562
+ )
563
+ has_tied_encoder_decoder = (
564
+ hasattr(model, "config")
565
+ and getattr(model.config, "is_encoder_decoder", False)
566
+ and getattr(model.config, "tie_encoder_decoder", False)
567
+ )
568
+ has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules())
569
+
570
+ return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module])
571
+
572
+
573
+ def _get_param_device(param, device_map):
574
+ if param in device_map:
575
+ return device_map[param]
576
+ parent_param = ".".join(param.split(".")[:-1])
577
+ if parent_param == param:
578
+ raise ValueError(f"The `device_map` does not contain the module {param}.")
579
+ else:
580
+ return _get_param_device(parent_param, device_map)
581
+
582
+
583
+ def check_tied_parameters_on_same_device(tied_params, device_map):
584
+ """
585
+ Check if tied parameters are on the same device
586
+
587
+ Args:
588
+ tied_params (`List[List[str]]`):
589
+ A list of lists of parameter names being all tied together.
590
+
591
+ device_map (`Dict[str, Union[int, str, torch.device]]`):
592
+ A map that specifies where each submodule should go.
593
+
594
+ """
595
+ for tie_param in tied_params:
596
+ tie_param_devices = {}
597
+ for param in tie_param:
598
+ tie_param_devices[param] = _get_param_device(param, device_map)
599
+ if len(set(tie_param_devices.values())) > 1:
600
+ logger.warn(
601
+ f"Tied parameters are on different devices: {tie_param_devices}. "
602
+ "Please modify your custom device map or set `device_map='auto'`. "
603
+ )
604
+
605
+
606
+ def find_tied_parameters(model: nn.Module, **kwargs):
607
+ """
608
+ Find the tied parameters in a given model.
609
+
610
+ <Tip warning={true}>
611
+
612
+ The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore
613
+ them.
614
+
615
+ </Tip>
616
+
617
+ Args:
618
+ model (`torch.nn.Module`): The model to inspect.
619
+
620
+ Returns:
621
+ List[List[str]]: A list of lists of parameter names being all tied together.
622
+
623
+ Example:
624
+
625
+ ```py
626
+ >>> from collections import OrderedDict
627
+ >>> import torch.nn as nn
628
+
629
+ >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))]))
630
+ >>> model.linear2.weight = model.linear1.weight
631
+ >>> find_tied_parameters(model)
632
+ [['linear1.weight', 'linear2.weight']]
633
+ ```
634
+ """
635
+ # Initialize result and named_parameters before recursing.
636
+ named_parameters = kwargs.get("named_parameters", None)
637
+ prefix = kwargs.get("prefix", "")
638
+ result = kwargs.get("result", {})
639
+
640
+ if named_parameters is None:
641
+ named_parameters = {n: p for n, p in model.named_parameters()}
642
+ else:
643
+ # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters`
644
+ # of the submodule it belongs to. So while recursing we track the names that are not in the initial
645
+ # `named_parameters`.
646
+ for name, parameter in model.named_parameters():
647
+ full_name = name if prefix == "" else f"{prefix}.{name}"
648
+ if full_name not in named_parameters:
649
+ # When we find one, it has to be one of the existing parameters.
650
+ for new_name, new_param in named_parameters.items():
651
+ if new_param is parameter:
652
+ if new_name not in result:
653
+ result[new_name] = []
654
+ result[new_name].append(full_name)
655
+
656
+ # Once we have treated direct parameters, we move to the child modules.
657
+ for name, child in model.named_children():
658
+ child_name = name if prefix == "" else f"{prefix}.{name}"
659
+ find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)
660
+
661
+ return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()])
662
+
663
+
664
+ def retie_parameters(model, tied_params):
665
+ """
666
+ Reties tied parameters in a given model if the link was broken (for instance when adding hooks).
667
+
668
+ Args:
669
+ model (`torch.nn.Module`):
670
+ The model in which to retie parameters.
671
+ tied_params (`List[List[str]]`):
672
+ A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`.
673
+ """
674
+ for tied_group in tied_params:
675
+ param_to_tie = None
676
+ # two loops : the first one to set param_to_tie , the second one to change the values of tied_group
677
+ for param_name in tied_group:
678
+ module = model
679
+ splits = param_name.split(".")
680
+ for split in splits[:-1]:
681
+ module = getattr(module, split)
682
+ param = getattr(module, splits[-1])
683
+ if param_to_tie is None and param.device != torch.device("meta"):
684
+ param_to_tie = param
685
+ break
686
+ if param_to_tie is not None:
687
+ for param_name in tied_group:
688
+ module = model
689
+ splits = param_name.split(".")
690
+ for split in splits[:-1]:
691
+ module = getattr(module, split)
692
+ setattr(module, splits[-1], param_to_tie)
693
+
694
+
695
+ def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype:
696
+ """
697
+ Just does torch.dtype(dtype) if necessary.
698
+ """
699
+ if isinstance(dtype, str):
700
+ # We accept "torch.float16" or just "float16"
701
+ dtype = dtype.replace("torch.", "")
702
+ dtype = getattr(torch, dtype)
703
+ return dtype
704
+
705
+
706
+ def compute_module_sizes(
707
+ model: nn.Module,
708
+ dtype: Optional[Union[str, torch.device]] = None,
709
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
710
+ buffers_only: bool = False,
711
+ ):
712
+ """
713
+ Compute the size of each submodule of a given model.
714
+ """
715
+ if dtype is not None:
716
+ dtype = _get_proper_dtype(dtype)
717
+ dtype_size = dtype_byte_size(dtype)
718
+ if special_dtypes is not None:
719
+ special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()}
720
+ special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()}
721
+ module_sizes = defaultdict(int)
722
+
723
+ module_list = []
724
+
725
+ if not buffers_only:
726
+ module_list = named_module_tensors(model, recurse=True)
727
+ else:
728
+ module_list = model.named_buffers(recurse=True)
729
+
730
+ for name, tensor in module_list:
731
+ if special_dtypes is not None and name in special_dtypes:
732
+ size = tensor.numel() * special_dtypes_size[name]
733
+ elif dtype is None:
734
+ size = tensor.numel() * dtype_byte_size(tensor.dtype)
735
+ elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
736
+ # According to the code in set_module_tensor_to_device, these types won't be converted
737
+ # so use their original size here
738
+ size = tensor.numel() * dtype_byte_size(tensor.dtype)
739
+ else:
740
+ size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype))
741
+ name_parts = name.split(".")
742
+ for idx in range(len(name_parts) + 1):
743
+ module_sizes[".".join(name_parts[:idx])] += size
744
+
745
+ return module_sizes
746
+
747
+
748
+ def compute_module_total_buffer_size(
749
+ model: nn.Module,
750
+ dtype: Optional[Union[str, torch.device]] = None,
751
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
752
+ ):
753
+ """
754
+ Compute the total size of buffers in each submodule of a given model.
755
+ """
756
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True)
757
+ return module_sizes.get("", 0)
758
+
759
+
760
+ def get_max_layer_size(
761
+ modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]
762
+ ):
763
+ """
764
+ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The
765
+ definition of a layer being:
766
+ - a module with no direct children (just parameters and buffers)
767
+ - a module whose class name is in the list `no_split_module_classes`
768
+
769
+ Args:
770
+ modules (`List[Tuple[str, torch.nn.Module]]`):
771
+ The list of named modules where we want to determine the maximum layer size.
772
+ module_sizes (`Dict[str, int]`):
773
+ A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).
774
+ no_split_module_classes (`List[str]`):
775
+ A list of class names for layers we don't want to be split.
776
+
777
+ Returns:
778
+ `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
779
+ """
780
+ max_size = 0
781
+ layer_names = []
782
+ modules_to_treat = modules.copy()
783
+ while len(modules_to_treat) > 0:
784
+ module_name, module = modules_to_treat.pop(0)
785
+ modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else []
786
+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
787
+ # No splitting this one so we compare to the max_size
788
+ size = module_sizes[module_name]
789
+ if size > max_size:
790
+ max_size = size
791
+ layer_names = [module_name]
792
+ elif size == max_size:
793
+ layer_names.append(module_name)
794
+ else:
795
+ modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat
796
+ return max_size, layer_names
797
+
798
+
799
+ def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):
800
+ """
801
+ Get the maximum memory available if nothing is passed, converts string to int otherwise.
802
+ """
803
+ import psutil
804
+
805
+ if max_memory is None:
806
+ if not (torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_xpu_available()):
807
+ max_memory = {}
808
+
809
+ else:
810
+ # Make sure CUDA is initialized on each GPU to have the right memory info.
811
+ if is_npu_available():
812
+ for i in range(torch.npu.device_count()):
813
+ _ = torch.tensor(0, device=torch.device("npu", i))
814
+ max_memory = {i: torch.npu.mem_get_info(i)[0] for i in range(torch.npu.device_count())}
815
+ elif is_mlu_available():
816
+ for i in range(torch.mlu.device_count()):
817
+ _ = torch.tensor(0, device=torch.device("mlu", i))
818
+ max_memory = {i: torch.mlu.mem_get_info(i)[0] for i in range(torch.mlu.device_count())}
819
+ elif is_xpu_available():
820
+ for i in range(torch.xpu.device_count()):
821
+ _ = torch.tensor(0, device=torch.device("xpu", i))
822
+ max_memory = {i: torch.xpu.max_memory_allocated(i) for i in range(torch.xpu.device_count())}
823
+ else:
824
+ for i in range(torch.cuda.device_count()):
825
+ _ = torch.tensor([0], device=i)
826
+ max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())}
827
+ # allocate everything in the mps device as the RAM is shared
828
+ if is_mps_available():
829
+ max_memory["mps"] = psutil.virtual_memory().available
830
+ else:
831
+ max_memory["cpu"] = psutil.virtual_memory().available
832
+ return max_memory
833
+
834
+ for key in max_memory:
835
+ if isinstance(max_memory[key], str):
836
+ max_memory[key] = convert_file_size_to_int(max_memory[key])
837
+
838
+ # Need to sort the device by type to make sure that we allocate the gpu first.
839
+ # As gpu/npu/xpu are represented by int, we need to sort them first.
840
+ gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)]
841
+ gpu_devices.sort()
842
+ # check if gpu/npu/xpu devices are available and if not, throw a warning
843
+ if is_npu_available():
844
+ num_devices = torch.npu.device_count()
845
+ elif is_mlu_available():
846
+ num_devices = torch.mlu.device_count()
847
+ elif is_xpu_available():
848
+ num_devices = torch.xpu.device_count()
849
+ else:
850
+ num_devices = torch.cuda.device_count()
851
+ for device in gpu_devices:
852
+ if device >= num_devices or device < 0:
853
+ logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}")
854
+ # Add the other devices in the preset order if they are available
855
+ all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()]
856
+ # Raise an error if a device is not recognized
857
+ for k in max_memory.keys():
858
+ if k not in all_devices:
859
+ raise ValueError(
860
+ f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'"
861
+ )
862
+ max_memory = {k: max_memory[k] for k in all_devices}
863
+
864
+ return max_memory
865
+
866
+
867
+ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""):
868
+ """
869
+ Cleans a device_map by grouping all submodules that go on the same device together.
870
+ """
871
+ # Get the value of the current module and if there is only one split across several keys, regroup it.
872
+ prefix = "" if module_name == "" else f"{module_name}."
873
+ values = [v for k, v in device_map.items() if k.startswith(prefix)]
874
+ if len(set(values)) == 1 and len(values) > 1:
875
+ for k in [k for k in device_map if k.startswith(prefix)]:
876
+ del device_map[k]
877
+ device_map[module_name] = values[0]
878
+
879
+ # Recurse over the children
880
+ children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)]
881
+ idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1
882
+ children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules)
883
+ for child in children_modules:
884
+ clean_device_map(device_map, module_name=child)
885
+
886
+ return device_map
887
+
888
+
889
+ def load_offloaded_weights(model, index, offload_folder):
890
+ """
891
+ Loads the weights from the offload folder into the model.
892
+
893
+ Args:
894
+ model (`torch.nn.Module`):
895
+ The model to load the weights into.
896
+ index (`dict`):
897
+ A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the
898
+ model.
899
+ offload_folder (`str`):
900
+ The folder where the offloaded weights are stored.
901
+ """
902
+ if index is None or len(index) == 0:
903
+ # Nothing to do
904
+ return
905
+ for param_name, metadata in index.items():
906
+ if "SCB" in param_name:
907
+ continue
908
+ fp16_statistics = None
909
+ if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys():
910
+ weight_name = param_name.replace("weight", "SCB")
911
+ fp16_statistics = load_offloaded_weight(
912
+ os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name]
913
+ )
914
+ tensor_file = os.path.join(offload_folder, f"{param_name}.dat")
915
+ weight = load_offloaded_weight(tensor_file, metadata)
916
+ set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics)
917
+
918
+
919
+ def get_balanced_memory(
920
+ model: nn.Module,
921
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
922
+ no_split_module_classes: Optional[List[str]] = None,
923
+ dtype: Optional[Union[str, torch.dtype]] = None,
924
+ special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
925
+ low_zero: bool = False,
926
+ ):
927
+ """
928
+ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.
929
+
930
+ <Tip>
931
+
932
+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
933
+ meta device (as it would if initialized within the `init_empty_weights` context manager).
934
+
935
+ </Tip>
936
+
937
+ Args:
938
+ model (`torch.nn.Module`):
939
+ The model to analyze.
940
+ max_memory (`Dict`, *optional*):
941
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
942
+ Example: `max_memory={0: "1GB"}`.
943
+ no_split_module_classes (`List[str]`, *optional*):
944
+ A list of layer class names that should never be split across device (for instance any layer that has a
945
+ residual connection).
946
+ dtype (`str` or `torch.dtype`, *optional*):
947
+ If provided, the weights will be converted to that type when loaded.
948
+ special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*):
949
+ If provided, special dtypes to consider for some specific weights (will override dtype used as default for
950
+ all weights).
951
+ low_zero (`bool`, *optional*):
952
+ Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the
953
+ Transformers generate function).
954
+ """
955
+ # Get default / clean up max_memory
956
+ user_not_set_max_memory = max_memory is None
957
+ max_memory = get_max_memory(max_memory)
958
+
959
+ if is_npu_available():
960
+ num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0])
961
+ elif is_mlu_available():
962
+ num_devices = len([d for d in max_memory if torch.device(d).type == "mlu" and max_memory[d] > 0])
963
+ elif is_xpu_available():
964
+ num_devices = len(
965
+ [
966
+ d
967
+ for d in max_memory
968
+ if (
969
+ d != "cpu"
970
+ and (torch.device(d).type == "xpu" or torch.xpu.get_device_properties(d).dev_type == "gpu")
971
+ )
972
+ and max_memory[d] > 0
973
+ ]
974
+ )
975
+ else:
976
+ num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
977
+
978
+ if num_devices == 0:
979
+ return max_memory
980
+
981
+ if num_devices == 1:
982
+ # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
983
+ low_zero = False
984
+ # If user just asked us to handle memory usage, we should avoid OOM
985
+ if user_not_set_max_memory:
986
+ for key in max_memory.keys():
987
+ if isinstance(key, int):
988
+ max_memory[key] *= 0.9 # 90% is a good compromise
989
+ logger.info(
990
+ f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. "
991
+ "You can set `max_memory` in to a higher value to use more memory (at your own risk)."
992
+ )
993
+ break # only one device
994
+
995
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
996
+ per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices)
997
+
998
+ # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get
999
+ # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to
1000
+ # add which is the biggest of:
1001
+ # - the size of no split block (if applicable)
1002
+ # - the mean of the layer sizes
1003
+ if no_split_module_classes is None:
1004
+ no_split_module_classes = []
1005
+ elif not isinstance(no_split_module_classes, (list, tuple)):
1006
+ no_split_module_classes = [no_split_module_classes]
1007
+
1008
+ # Identify the size of the no_split_block modules
1009
+ if len(no_split_module_classes) > 0:
1010
+ no_split_children = {}
1011
+ for name, size in module_sizes.items():
1012
+ if name == "":
1013
+ continue
1014
+ submodule = model
1015
+ for submodule_name in name.split("."):
1016
+ submodule = getattr(submodule, submodule_name)
1017
+ class_name = submodule.__class__.__name__
1018
+ if class_name in no_split_module_classes and class_name not in no_split_children:
1019
+ no_split_children[class_name] = size
1020
+
1021
+ if set(no_split_children.keys()) == set(no_split_module_classes):
1022
+ break
1023
+ buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0
1024
+ else:
1025
+ buffer = 0
1026
+
1027
+ # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters
1028
+ leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
1029
+ module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves}
1030
+ # Once removed, leaves are the final modules.
1031
+ leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
1032
+ mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1))
1033
+ buffer = int(1.25 * max(buffer, mean_leaves))
1034
+ per_gpu += buffer
1035
+
1036
+ # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them)
1037
+ gpus_idx_list = list(
1038
+ sorted(
1039
+ device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0
1040
+ )
1041
+ )
1042
+ # The last device is left with max_memory just in case the buffer is not enough.
1043
+ for idx in gpus_idx_list[:-1]:
1044
+ max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx])
1045
+
1046
+ if low_zero:
1047
+ min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)]))
1048
+ max_memory[0] = min(min_zero, max_memory[0])
1049
+
1050
+ return max_memory
1051
+
1052
+
1053
+ def calculate_maximum_sizes(model: torch.nn.Module):
1054
+ "Computes the total size of the model and its largest layer"
1055
+ sizes = compute_module_sizes(model)
1056
+ # `transformers` models store this information for us
1057
+ no_split_modules = getattr(model, "_no_split_modules", None)
1058
+ if no_split_modules is None:
1059
+ no_split_modules = []
1060
+
1061
+ modules_to_treat = (
1062
+ list(model.named_parameters(recurse=False))
1063
+ + list(model.named_children())
1064
+ + list(model.named_buffers(recurse=False))
1065
+ )
1066
+ largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
1067
+ total_size = sizes[""]
1068
+ return total_size, largest_layer
1069
+
1070
+
1071
+ def infer_auto_device_map(
1072
+ model: nn.Module,
1073
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
1074
+ no_split_module_classes: Optional[List[str]] = None,
1075
+ dtype: Optional[Union[str, torch.dtype]] = None,
1076
+ special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None,
1077
+ verbose: bool = False,
1078
+ clean_result: bool = True,
1079
+ offload_buffers: bool = False,
1080
+ ):
1081
+ """
1082
+ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,
1083
+ such that:
1084
+ - we don't exceed the memory available of any of the GPU.
1085
+ - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that
1086
+ has the largest size.
1087
+ - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
1088
+ - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
1089
+ that has the largest size.
1090
+
1091
+ <Tip>
1092
+
1093
+ All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
1094
+ meta device (as it would if initialized within the `init_empty_weights` context manager).
1095
+
1096
+ </Tip>
1097
+
1098
+ Args:
1099
+ model (`torch.nn.Module`):
1100
+ The model to analyze.
1101
+ max_memory (`Dict`, *optional*):
1102
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
1103
+ Example: `max_memory={0: "1GB"}`.
1104
+ no_split_module_classes (`List[str]`, *optional*):
1105
+ A list of layer class names that should never be split across device (for instance any layer that has a
1106
+ residual connection).
1107
+ dtype (`str` or `torch.dtype`, *optional*):
1108
+ If provided, the weights will be converted to that type when loaded.
1109
+ special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*):
1110
+ If provided, special dtypes to consider for some specific weights (will override dtype used as default for
1111
+ all weights).
1112
+ verbose (`bool`, *optional*, defaults to `False`):
1113
+ Whether or not to provide debugging statements as the function builds the device_map.
1114
+ clean_result (`bool`, *optional*, defaults to `True`):
1115
+ Clean the resulting device_map by grouping all submodules that go on the same device together.
1116
+ offload_buffers (`bool`, *optional*, defaults to `False`):
1117
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
1118
+ well as the parameters.
1119
+ """
1120
+ # Get default / clean up max_memory
1121
+ max_memory = get_max_memory(max_memory)
1122
+ if no_split_module_classes is None:
1123
+ no_split_module_classes = []
1124
+ elif not isinstance(no_split_module_classes, (list, tuple)):
1125
+ no_split_module_classes = [no_split_module_classes]
1126
+
1127
+ devices = list(max_memory.keys())
1128
+ if "disk" not in devices:
1129
+ devices.append("disk")
1130
+ gpus = [device for device in devices if device not in ["cpu", "disk"]]
1131
+
1132
+ # Devices that need to keep space for a potential offloaded layer.
1133
+ if "mps" in gpus:
1134
+ main_devices = ["mps"]
1135
+ elif len(gpus) > 0:
1136
+ main_devices = [gpus[0], "cpu"]
1137
+ else:
1138
+ main_devices = ["cpu"]
1139
+
1140
+ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
1141
+ tied_parameters = find_tied_parameters(model)
1142
+
1143
+ if check_tied_parameters_in_config(model) and len(tied_parameters) == 0:
1144
+ logger.warn(
1145
+ "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1146
+ )
1147
+
1148
+ device_map = OrderedDict()
1149
+ current_device = 0
1150
+ current_memory_used = 0
1151
+ device_memory_used = {}
1152
+ device_buffer_sizes = {}
1153
+
1154
+ # Direct submodules and parameters
1155
+ modules_to_treat = (
1156
+ list(model.named_parameters(recurse=False))
1157
+ + list(model.named_children())
1158
+ + list(model.named_buffers(recurse=False))
1159
+ )
1160
+ # Initialize maximum largest layer, to know which space to keep in memory
1161
+ max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
1162
+
1163
+ # Ready ? This is going to be a bit messy.
1164
+ while len(modules_to_treat) > 0:
1165
+ name, module = modules_to_treat.pop(0)
1166
+ if verbose:
1167
+ print(f"\nTreating module {name}.")
1168
+ # Max size in the remaining layers may have changed since we took one, so we maybe update it.
1169
+ max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")]
1170
+ if len(max_layer_names) == 0:
1171
+ max_layer_size, max_layer_names = get_max_layer_size(
1172
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1173
+ module_sizes,
1174
+ no_split_module_classes,
1175
+ )
1176
+ # Assess size needed
1177
+ module_size = module_sizes[name]
1178
+
1179
+ # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module
1180
+ # and the other is not.
1181
+ # Note: If we are currently processing the name `compute.weight`, an other parameter named e.g. `compute.weight_submodule.parameter`
1182
+ # needs to be considered outside the current module, hence the check with additional dots.
1183
+ tied_param_goups = [
1184
+ tied_group
1185
+ for tied_group in tied_parameters
1186
+ if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group)
1187
+ ]
1188
+
1189
+ if verbose and len(tied_param_goups) > 0:
1190
+ print(f" Found the relevant tied param groups {tied_param_goups}")
1191
+
1192
+ # Then we keep track of all the parameters that are tied to the current module, but not in the current module
1193
+ tied_params = sum(
1194
+ [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_goups], []
1195
+ )
1196
+
1197
+ if verbose and len(tied_params) > 0:
1198
+ print(f" So those parameters need to be taken into account {tied_params}")
1199
+
1200
+ device = devices[current_device]
1201
+ current_max_size = max_memory[device] if device != "disk" else None
1202
+ current_memory_reserved = 0
1203
+ # Reduce max size available by the largest layer.
1204
+ if devices[current_device] in main_devices:
1205
+ current_max_size = current_max_size - max_layer_size
1206
+ current_memory_reserved = max_layer_size
1207
+ # Case 1 -> We're too big!
1208
+ if current_max_size is not None and current_memory_used + module_size > current_max_size:
1209
+ # Split or not split?
1210
+ modules_children = (
1211
+ []
1212
+ if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor)
1213
+ else list(module.named_children())
1214
+ )
1215
+ if verbose:
1216
+ print(
1217
+ f"Not enough space on {devices[current_device]} to put {name} (space available "
1218
+ f"{current_max_size - current_memory_used}, module size {module_size})."
1219
+ )
1220
+ if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes:
1221
+ # -> no split, we go to the next device
1222
+ if verbose:
1223
+ print("This module cannot be split, going to the next device.")
1224
+
1225
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1226
+ current_device += 1
1227
+ modules_to_treat = [(name, module)] + modules_to_treat
1228
+ current_memory_used = 0
1229
+ else:
1230
+ # -> split, we replace the module studied by its children + parameters
1231
+ if verbose:
1232
+ print(f"Splitting {name}.")
1233
+ modules_children = list(module.named_parameters(recurse=False)) + modules_children
1234
+ modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat
1235
+ # Update the max layer size.
1236
+ max_layer_size, max_layer_names = get_max_layer_size(
1237
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1238
+ module_sizes,
1239
+ no_split_module_classes,
1240
+ )
1241
+
1242
+ # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.
1243
+ elif len(tied_params) > 0:
1244
+ # First locate all tied modules
1245
+ tied_module_names = []
1246
+ tied_modules = []
1247
+ for tied_param in tied_params:
1248
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0]
1249
+ tied_module_names.append(modules_to_treat[tied_module_index][0])
1250
+ tied_modules.append(modules_to_treat[tied_module_index][1])
1251
+ if verbose:
1252
+ print(
1253
+ f" It looks like {name} is going to fit on {devices[current_device]} but we have tied "
1254
+ f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}"
1255
+ )
1256
+
1257
+ # Let's see if it all fits first
1258
+ module_size_with_ties = module_size
1259
+ for tied_param, tied_module_name in zip(tied_params, tied_module_names):
1260
+ module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param]
1261
+
1262
+ if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size:
1263
+ # We really really fit!
1264
+ if verbose:
1265
+ print(f"Putting {name} and {tied_module_names} on {devices[current_device]}.")
1266
+ current_memory_used += module_size_with_ties
1267
+ device_map[name] = devices[current_device]
1268
+ for tied_module_name in tied_module_names:
1269
+ if tied_module_name in [m[0] for m in modules_to_treat]:
1270
+ # The module may have been removed by a previous iteration of this loop.
1271
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][
1272
+ 0
1273
+ ]
1274
+ modules_to_treat.pop(tied_module_index)
1275
+ device_map[tied_module_name] = devices[current_device]
1276
+
1277
+ if not offload_buffers and isinstance(module, nn.Module):
1278
+ current_buffer_size = compute_module_total_buffer_size(
1279
+ module, dtype=dtype, special_dtypes=special_dtypes
1280
+ )
1281
+ device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size
1282
+
1283
+ else:
1284
+ # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it
1285
+ # smaller or do we need to go on the next device?
1286
+ if verbose:
1287
+ print(
1288
+ f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space "
1289
+ f"available {current_max_size - current_memory_used}, needed size {module_size_with_ties})."
1290
+ )
1291
+ split_happened = False
1292
+ for tied_module_name, tied_module in zip(tied_module_names, tied_modules):
1293
+ tied_module_children = list(tied_module.named_children())
1294
+ if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
1295
+ # can't break this one.
1296
+ continue
1297
+
1298
+ if verbose:
1299
+ print(f"Splitting {tied_module_name}.")
1300
+ tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
1301
+ tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children]
1302
+ tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0]
1303
+
1304
+ modules_to_treat = (
1305
+ [(name, module)]
1306
+ + modules_to_treat[:tied_module_index]
1307
+ + tied_module_children
1308
+ + modules_to_treat[tied_module_index + 1 :]
1309
+ )
1310
+ # Update the max layer size.
1311
+ max_layer_size, max_layer_names = get_max_layer_size(
1312
+ [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
1313
+ module_sizes,
1314
+ no_split_module_classes,
1315
+ )
1316
+ split_happened = True
1317
+ break
1318
+
1319
+ if not split_happened:
1320
+ # If the tied module is not split, we go to the next device
1321
+ if verbose:
1322
+ print("None of the tied module can be split, going to the next device.")
1323
+
1324
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1325
+ current_device += 1
1326
+ modules_to_treat = [(name, module)] + modules_to_treat
1327
+ current_memory_used = 0
1328
+
1329
+ else:
1330
+ if verbose:
1331
+ if current_max_size is None:
1332
+ print(f"Putting {name} (size={module_size}) on {devices[current_device]}.")
1333
+ else:
1334
+ print(
1335
+ f"Putting {name} (size={module_size}) on {devices[current_device]} "
1336
+ f"(available={current_max_size - current_memory_used})."
1337
+ )
1338
+ current_memory_used += module_size
1339
+ device_memory_used[device] = current_memory_used + current_memory_reserved
1340
+ device_map[name] = devices[current_device]
1341
+
1342
+ if not offload_buffers and isinstance(module, nn.Module):
1343
+ current_buffer_size = compute_module_total_buffer_size(
1344
+ module, dtype=dtype, special_dtypes=special_dtypes
1345
+ )
1346
+ device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size
1347
+
1348
+ if clean_result:
1349
+ device_map = clean_device_map(device_map)
1350
+
1351
+ non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0)
1352
+ if non_gpu_buffer_size > 0 and not offload_buffers:
1353
+ is_buffer_fit_any_gpu = False
1354
+ for gpu_device, gpu_max_memory in max_memory.items():
1355
+ if gpu_device == "cpu" or gpu_device == "disk":
1356
+ continue
1357
+
1358
+ if not is_buffer_fit_any_gpu:
1359
+ gpu_memory_used = device_memory_used.get(gpu_device, 0)
1360
+
1361
+ if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used:
1362
+ is_buffer_fit_any_gpu = True
1363
+
1364
+ if len(gpus) > 0 and not is_buffer_fit_any_gpu:
1365
+ warnings.warn(
1366
+ f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does "
1367
+ f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using "
1368
+ f"offload_buffers=True."
1369
+ )
1370
+
1371
+ return device_map
1372
+
1373
+
1374
+ def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):
1375
+ """
1376
+ Checks a device map covers everything in a given model.
1377
+
1378
+ Args:
1379
+ model (`torch.nn.Module`): The model to check the device map against.
1380
+ device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
1381
+ """
1382
+ all_model_tensors = [name for name, _ in model.state_dict().items()]
1383
+ for module_name in device_map.keys():
1384
+ if module_name == "":
1385
+ all_model_tensors.clear()
1386
+ break
1387
+ else:
1388
+ all_model_tensors = [
1389
+ name
1390
+ for name in all_model_tensors
1391
+ if not name == module_name and not name.startswith(module_name + ".")
1392
+ ]
1393
+ if len(all_model_tensors) > 0:
1394
+ non_covered_params = ", ".join(all_model_tensors)
1395
+ raise ValueError(
1396
+ f"The device_map provided does not give any device for the following parameters: {non_covered_params}"
1397
+ )
1398
+
1399
+
1400
+ def load_state_dict(checkpoint_file, device_map=None):
1401
+ """
1402
+ Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the
1403
+ weights can be fast-loaded directly on the GPU.
1404
+
1405
+ Args:
1406
+ checkpoint_file (`str`): The path to the checkpoint to load.
1407
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
1408
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
1409
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
1410
+ """
1411
+ if checkpoint_file.endswith(".safetensors"):
1412
+ with safe_open(checkpoint_file, framework="pt") as f:
1413
+ metadata = f.metadata()
1414
+ weight_names = f.keys()
1415
+
1416
+ if metadata is None:
1417
+ logger.warn(
1418
+ f"The safetensors archive passed at {checkpoint_file} does not contain metadata. "
1419
+ "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata."
1420
+ )
1421
+ metadata = {"format": "pt"}
1422
+
1423
+ if metadata.get("format") not in ["pt", "tf", "flax"]:
1424
+ raise OSError(
1425
+ f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
1426
+ "you save your model with the `save_pretrained` method."
1427
+ )
1428
+ elif metadata["format"] != "pt":
1429
+ raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.")
1430
+ if device_map is None:
1431
+ return safe_load_file(checkpoint_file)
1432
+ else:
1433
+ # if we only have one device we can load everything directly
1434
+ if len(set(device_map.values())) == 1:
1435
+ return safe_load_file(checkpoint_file, device=list(device_map.values())[0])
1436
+
1437
+ devices = list(set(device_map.values()) - {"disk"})
1438
+ # cpu device should always exist as fallback option
1439
+ if "cpu" not in devices:
1440
+ devices.append("cpu")
1441
+
1442
+ # For each device, get the weights that go there
1443
+ device_weights = {device: [] for device in devices}
1444
+ for module_name, device in device_map.items():
1445
+ if device in devices:
1446
+ device_weights[device].extend(
1447
+ [k for k in weight_names if k == module_name or k.startswith(module_name + ".")]
1448
+ )
1449
+
1450
+ # all weights that haven't defined a device should be loaded on CPU
1451
+ device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])])
1452
+ tensors = {}
1453
+ if is_tqdm_available():
1454
+ progress_bar = tqdm(
1455
+ main_process_only=False,
1456
+ total=sum([len(device_weights[device]) for device in devices]),
1457
+ unit="w",
1458
+ smoothing=0,
1459
+ leave=False,
1460
+ )
1461
+ else:
1462
+ progress_bar = None
1463
+ for device in devices:
1464
+ target_device = device
1465
+
1466
+ if is_xpu_available():
1467
+ current_safetensors_version = packaging.version.parse(importlib.metadata.version("safetensors"))
1468
+
1469
+ if compare_versions(current_safetensors_version, "<", "0.4.2"):
1470
+ raise ModuleNotFoundError(
1471
+ f"You need at least safetensors 0.4.2 for Intel GPU, while you have {current_safetensors_version}"
1472
+ )
1473
+
1474
+ if isinstance(device, int):
1475
+ target_device = f"xpu:{device}"
1476
+
1477
+ with safe_open(checkpoint_file, framework="pt", device=target_device) as f:
1478
+ for key in device_weights[device]:
1479
+ if progress_bar is not None:
1480
+ progress_bar.set_postfix(dev=device, refresh=False)
1481
+ progress_bar.set_description(key)
1482
+ tensors[key] = f.get_tensor(key)
1483
+ if progress_bar is not None:
1484
+ progress_bar.update()
1485
+ if progress_bar is not None:
1486
+ progress_bar.close()
1487
+
1488
+ return tensors
1489
+ else:
1490
+ return torch.load(checkpoint_file, map_location=torch.device("cpu"))
1491
+
1492
+
1493
+ def get_state_dict_offloaded_model(model: nn.Module):
1494
+ """
1495
+ Returns the state dictionary for an offloaded model via iterative onloading
1496
+
1497
+ Args:
1498
+ model (`torch.nn.Module`):
1499
+ The offloaded model we want to save
1500
+ """
1501
+ from ..hooks import AlignDevicesHook
1502
+
1503
+ state_dict = {}
1504
+ placeholders = set()
1505
+ for name, module in model.named_modules():
1506
+ if name == "":
1507
+ continue
1508
+ if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload:
1509
+ original_device = module._hf_hook.execution_device
1510
+ # assign hook execution device to cpu
1511
+ module._hf_hook.execution_device = "cpu"
1512
+ # onload meta tensors to execution device
1513
+ try:
1514
+ module._hf_hook.pre_forward(module)
1515
+ except MemoryError:
1516
+ raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None
1517
+ module_state_dict = module.state_dict()
1518
+ # offload meta tensors from cpu
1519
+ module._hf_hook.post_forward(module, torch.tensor([]))
1520
+ # re-assign hook to original execution device
1521
+ module._hf_hook.execution_device = original_device
1522
+ else:
1523
+ module_state_dict = module.state_dict()
1524
+
1525
+ for key in module_state_dict:
1526
+ # ignore placeholder parameters that are still on the meta device
1527
+ if module_state_dict[key].device == torch.device("meta"):
1528
+ placeholders.add(name + f".{key}")
1529
+ continue
1530
+ params = module_state_dict[key]
1531
+ state_dict[name + f".{key}"] = params
1532
+ for key in placeholders.copy():
1533
+ if key in state_dict:
1534
+ placeholders.remove(key)
1535
+ if placeholders:
1536
+ logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}")
1537
+
1538
+ return state_dict
1539
+
1540
+
1541
+ def load_checkpoint_in_model(
1542
+ model: nn.Module,
1543
+ checkpoint: Union[str, os.PathLike],
1544
+ device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None,
1545
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
1546
+ dtype: Optional[Union[str, torch.dtype]] = None,
1547
+ offload_state_dict: bool = False,
1548
+ offload_buffers: bool = False,
1549
+ keep_in_fp32_modules: List[str] = None,
1550
+ offload_8bit_bnb: bool = False,
1551
+ strict: bool = False,
1552
+ ):
1553
+ """
1554
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
1555
+ loaded.
1556
+
1557
+ <Tip warning={true}>
1558
+
1559
+ Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To
1560
+ group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].
1561
+
1562
+ </Tip>
1563
+
1564
+ Args:
1565
+ model (`torch.nn.Module`):
1566
+ The model in which we want to load a checkpoint.
1567
+ checkpoint (`str` or `os.PathLike`):
1568
+ The folder checkpoint to load. It can be:
1569
+ - a path to a file containing a whole model state dict
1570
+ - a path to a `.json` file containing the index to a sharded checkpoint
1571
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
1572
+ - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file.
1573
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
1574
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
1575
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
1576
+ offload_folder (`str` or `os.PathLike`, *optional*):
1577
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
1578
+ dtype (`str` or `torch.dtype`, *optional*):
1579
+ If provided, the weights will be converted to that type when loaded.
1580
+ offload_state_dict (`bool`, *optional*, defaults to `False`):
1581
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
1582
+ the weight of the CPU state dict + the biggest shard does not fit.
1583
+ offload_buffers (`bool`, *optional*, defaults to `False`):
1584
+ Whether or not to include the buffers in the weights offloaded to disk.
1585
+ keep_in_fp32_modules(`List[str]`, *optional*):
1586
+ A list of the modules that we keep in `torch.float32` dtype.
1587
+ offload_8bit_bnb (`bool`, *optional*):
1588
+ Whether or not to enable offload of 8-bit modules on cpu/disk.
1589
+ strict (`bool`, *optional*, defaults to `False`):
1590
+ Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
1591
+ state_dict.
1592
+
1593
+ """
1594
+ if offload_8bit_bnb:
1595
+ from .bnb import quantize_and_offload_8bit
1596
+
1597
+ tied_params = find_tied_parameters(model)
1598
+
1599
+ if check_tied_parameters_in_config(model) and len(tied_params) == 0:
1600
+ logger.warn(
1601
+ "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1602
+ )
1603
+ if device_map is not None:
1604
+ check_tied_parameters_on_same_device(tied_params, device_map)
1605
+
1606
+ if offload_folder is None and device_map is not None and "disk" in device_map.values():
1607
+ raise ValueError(
1608
+ "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
1609
+ )
1610
+ elif offload_folder is not None and device_map is not None and "disk" in device_map.values():
1611
+ os.makedirs(offload_folder, exist_ok=True)
1612
+
1613
+ if isinstance(dtype, str):
1614
+ # We accept "torch.float16" or just "float16"
1615
+ dtype = dtype.replace("torch.", "")
1616
+ dtype = getattr(torch, dtype)
1617
+
1618
+ checkpoint_files = None
1619
+ index_filename = None
1620
+ if os.path.isfile(checkpoint):
1621
+ if str(checkpoint).endswith(".json"):
1622
+ index_filename = checkpoint
1623
+ else:
1624
+ checkpoint_files = [checkpoint]
1625
+ elif os.path.isdir(checkpoint):
1626
+ # check if the whole state dict is present
1627
+ potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME]
1628
+ potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME]
1629
+ if len(potential_state_bin) == 1:
1630
+ checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])]
1631
+ elif len(potential_state_safetensor) == 1:
1632
+ checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])]
1633
+ else:
1634
+ # otherwise check for sharded checkpoints
1635
+ potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")]
1636
+ if len(potential_index) == 0:
1637
+ raise ValueError(
1638
+ f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file"
1639
+ )
1640
+ elif len(potential_index) == 1:
1641
+ index_filename = os.path.join(checkpoint, potential_index[0])
1642
+ else:
1643
+ raise ValueError(
1644
+ f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones."
1645
+ )
1646
+ else:
1647
+ raise ValueError(
1648
+ "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded "
1649
+ f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}."
1650
+ )
1651
+
1652
+ if index_filename is not None:
1653
+ checkpoint_folder = os.path.split(index_filename)[0]
1654
+ with open(index_filename) as f:
1655
+ index = json.loads(f.read())
1656
+
1657
+ if "weight_map" in index:
1658
+ index = index["weight_map"]
1659
+ checkpoint_files = sorted(list(set(index.values())))
1660
+ checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]
1661
+
1662
+ # Logic for missing/unexepected keys goes here.
1663
+
1664
+ offload_index = {}
1665
+ if offload_state_dict:
1666
+ state_dict_folder = tempfile.mkdtemp()
1667
+ state_dict_index = {}
1668
+
1669
+ unexpected_keys = set()
1670
+ model_keys = set(model.state_dict().keys())
1671
+ buffer_names = [name for name, _ in model.named_buffers()]
1672
+ for checkpoint_file in checkpoint_files:
1673
+ loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map)
1674
+ if device_map is None:
1675
+ model.load_state_dict(loaded_checkpoint, strict=strict)
1676
+ unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys)
1677
+ else:
1678
+ for param_name, param in loaded_checkpoint.items():
1679
+ # skip SCB parameter (for 8-bit serialization)
1680
+ if "SCB" in param_name:
1681
+ continue
1682
+
1683
+ if param_name not in model_keys:
1684
+ unexpected_keys.add(param_name)
1685
+ if not strict:
1686
+ continue # Skip loading this parameter.
1687
+
1688
+ module_name = param_name
1689
+
1690
+ while len(module_name) > 0 and module_name not in device_map:
1691
+ module_name = ".".join(module_name.split(".")[:-1])
1692
+ if module_name == "" and "" not in device_map:
1693
+ # TODO: group all errors and raise at the end.
1694
+ raise ValueError(f"{param_name} doesn't have any device set.")
1695
+ param_device = device_map[module_name]
1696
+ new_dtype = dtype
1697
+ if dtype is not None and torch.is_floating_point(param):
1698
+ if keep_in_fp32_modules is not None and dtype == torch.float16:
1699
+ proceed = False
1700
+ for key in keep_in_fp32_modules:
1701
+ if ((key in param_name) and (key + "." in param_name)) or key == param_name:
1702
+ proceed = True
1703
+ break
1704
+ if proceed:
1705
+ new_dtype = torch.float32
1706
+
1707
+ if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys():
1708
+ if param.dtype == torch.int8:
1709
+ fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")]
1710
+ else:
1711
+ fp16_statistics = None
1712
+
1713
+ if param_device == "disk":
1714
+ if offload_buffers or param_name not in buffer_names:
1715
+ if new_dtype is None:
1716
+ new_dtype = param.dtype
1717
+ if offload_8bit_bnb:
1718
+ quantize_and_offload_8bit(
1719
+ model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics
1720
+ )
1721
+ continue
1722
+ else:
1723
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype)
1724
+ offload_weight(param, param_name, offload_folder, index=offload_index)
1725
+ elif param_device == "cpu" and offload_state_dict:
1726
+ if new_dtype is None:
1727
+ new_dtype = param.dtype
1728
+ if offload_8bit_bnb:
1729
+ quantize_and_offload_8bit(
1730
+ model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics
1731
+ )
1732
+ else:
1733
+ set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype)
1734
+ offload_weight(param, param_name, state_dict_folder, index=state_dict_index)
1735
+ else:
1736
+ set_module_tensor_to_device(
1737
+ model,
1738
+ param_name,
1739
+ param_device,
1740
+ value=param,
1741
+ dtype=new_dtype,
1742
+ fp16_statistics=fp16_statistics,
1743
+ )
1744
+
1745
+ # Force Python to clean up.
1746
+ del loaded_checkpoint
1747
+ gc.collect()
1748
+
1749
+ if not strict and len(unexpected_keys) > 0:
1750
+ logger.warning(
1751
+ f"Some weights of the model checkpoint at {checkpoint} were not used when"
1752
+ f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint."
1753
+ )
1754
+
1755
+ save_offload_index(offload_index, offload_folder)
1756
+
1757
+ # Load back offloaded state dict on CPU
1758
+ if offload_state_dict:
1759
+ load_offloaded_weights(model, state_dict_index, state_dict_folder)
1760
+ shutil.rmtree(state_dict_folder)
1761
+
1762
+ retie_parameters(model, tied_params)
1763
+
1764
+
1765
+ def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None):
1766
+ """
1767
+ Return a context manager for autocasting mixed precision
1768
+
1769
+ Args:
1770
+ native_amp (`bool`, *optional*, defaults to False):
1771
+ Whether mixed precision is actually enabled.
1772
+ cache_enabled (`bool`, *optional*, defaults to True):
1773
+ Whether the weight cache inside autocast should be enabled.
1774
+ """
1775
+ state = AcceleratorState()
1776
+ if autocast_kwargs is None:
1777
+ autocast_kwargs = {}
1778
+ else:
1779
+ autocast_kwargs = autocast_kwargs.to_kwargs()
1780
+ if native_amp:
1781
+ device_type = (
1782
+ "cuda"
1783
+ if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True))
1784
+ else state.device.type
1785
+ )
1786
+ if state.mixed_precision == "fp16":
1787
+ return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs)
1788
+ elif state.mixed_precision in ["bf16", "fp8"] and state.distributed_type in [
1789
+ DistributedType.NO,
1790
+ DistributedType.MULTI_CPU,
1791
+ DistributedType.MULTI_GPU,
1792
+ DistributedType.MULTI_MLU,
1793
+ DistributedType.MULTI_NPU,
1794
+ DistributedType.MULTI_XPU,
1795
+ DistributedType.FSDP,
1796
+ DistributedType.XLA,
1797
+ ]:
1798
+ return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs)
1799
+ else:
1800
+ return torch.autocast(device_type=device_type, **autocast_kwargs)
1801
+ else:
1802
+ return contextlib.nullcontext()
llmeval-env/lib/python3.10/site-packages/accelerate/utils/offload.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from collections.abc import Mapping
18
+ from typing import Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ from safetensors import safe_open
23
+
24
+
25
+ def offload_weight(weight, weight_name, offload_folder, index=None):
26
+ dtype = None
27
+ # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16.
28
+ if str(weight.dtype) == "torch.bfloat16":
29
+ # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s.
30
+ weight = weight.view(torch.int16)
31
+ dtype = "bfloat16"
32
+ array = weight.cpu().numpy()
33
+ tensor_file = os.path.join(offload_folder, f"{weight_name}.dat")
34
+ if index is not None:
35
+ if dtype is None:
36
+ dtype = str(array.dtype)
37
+ index[weight_name] = {"dtype": dtype, "shape": list(array.shape)}
38
+ if array.ndim == 0:
39
+ array = array[None]
40
+ file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape)
41
+ file_array[:] = array[:]
42
+ file_array.flush()
43
+ return index
44
+
45
+
46
+ def load_offloaded_weight(weight_file, weight_info):
47
+ shape = tuple(weight_info["shape"])
48
+ if shape == ():
49
+ # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
50
+ shape = (1,)
51
+
52
+ dtype = weight_info["dtype"]
53
+ if dtype == "bfloat16":
54
+ # NumPy does not support bfloat16 so this was saved as a int16
55
+ dtype = "int16"
56
+
57
+ weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
58
+
59
+ if len(weight_info["shape"]) == 0:
60
+ weight = weight[0]
61
+ weight = torch.tensor(weight)
62
+ if weight_info["dtype"] == "bfloat16":
63
+ weight = weight.view(torch.bfloat16)
64
+
65
+ return weight
66
+
67
+
68
+ def save_offload_index(index, offload_folder):
69
+ if index is None or len(index) == 0:
70
+ # Nothing to save
71
+ return
72
+
73
+ offload_index_file = os.path.join(offload_folder, "index.json")
74
+ if os.path.isfile(offload_index_file):
75
+ with open(offload_index_file, encoding="utf-8") as f:
76
+ current_index = json.load(f)
77
+ else:
78
+ current_index = {}
79
+ current_index.update(index)
80
+
81
+ with open(offload_index_file, "w", encoding="utf-8") as f:
82
+ json.dump(current_index, f, indent=2)
83
+
84
+
85
+ def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
86
+ """
87
+ Offload a state dict in a given folder.
88
+
89
+ Args:
90
+ save_dir (`str` or `os.PathLike`):
91
+ The directory in which to offload the state dict.
92
+ state_dict (`Dict[str, torch.Tensor]`):
93
+ The dictionary of tensors to offload.
94
+ """
95
+ os.makedirs(save_dir, exist_ok=True)
96
+ index = {}
97
+ for name, parameter in state_dict.items():
98
+ index = offload_weight(parameter, name, save_dir, index=index)
99
+
100
+ # Update index
101
+ save_offload_index(index, save_dir)
102
+
103
+
104
+ class PrefixedDataset(Mapping):
105
+ """
106
+ Will access keys in a given dataset by adding a prefix.
107
+
108
+ Args:
109
+ dataset (`Mapping`): Any map with string keys.
110
+ prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
111
+ """
112
+
113
+ def __init__(self, dataset: Mapping, prefix: str):
114
+ self.dataset = dataset
115
+ self.prefix = prefix
116
+
117
+ def __getitem__(self, key):
118
+ return self.dataset[f"{self.prefix}{key}"]
119
+
120
+ def __iter__(self):
121
+ return iter([key for key in self.dataset if key.startswith(self.prefix)])
122
+
123
+ def __len__(self):
124
+ return len(self.dataset)
125
+
126
+
127
+ class OffloadedWeightsLoader(Mapping):
128
+ """
129
+ A collection that loads weights stored in a given state dict or memory-mapped on disk.
130
+
131
+ Args:
132
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
133
+ A dictionary parameter name to tensor.
134
+ save_folder (`str` or `os.PathLike`, *optional*):
135
+ The directory in which the weights are stored (by `offload_state_dict` for instance).
136
+ index (`Dict`, *optional*):
137
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
138
+ to the index saved in `save_folder`.
139
+ """
140
+
141
+ def __init__(
142
+ self,
143
+ state_dict: Dict[str, torch.Tensor] = None,
144
+ save_folder: Optional[Union[str, os.PathLike]] = None,
145
+ index: Mapping = None,
146
+ device=None,
147
+ ):
148
+ if state_dict is None and save_folder is None and index is None:
149
+ raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
150
+
151
+ self.state_dict = {} if state_dict is None else state_dict
152
+ self.save_folder = save_folder
153
+ if index is None and save_folder is not None:
154
+ with open(os.path.join(save_folder, "index.json")) as f:
155
+ index = json.load(f)
156
+ self.index = {} if index is None else index
157
+ self.all_keys = list(self.state_dict.keys())
158
+ self.all_keys.extend([key for key in self.index if key not in self.all_keys])
159
+ self.device = device
160
+
161
+ def __getitem__(self, key: str):
162
+ # State dict gets priority
163
+ if key in self.state_dict:
164
+ return self.state_dict[key]
165
+ weight_info = self.index[key]
166
+ if weight_info.get("safetensors_file") is not None:
167
+ device = "cpu" if self.device is None else self.device
168
+ tensor = None
169
+ try:
170
+ with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f:
171
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
172
+ except TypeError:
173
+ # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
174
+ with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
175
+ tensor = f.get_tensor(weight_info.get("weight_name", key))
176
+
177
+ if "dtype" in weight_info:
178
+ tensor = tensor.to(getattr(torch, weight_info["dtype"]))
179
+
180
+ if tensor.device != torch.device(device):
181
+ tensor = tensor.to(device)
182
+ return tensor
183
+
184
+ weight_file = os.path.join(self.save_folder, f"{key}.dat")
185
+ return load_offloaded_weight(weight_file, weight_info)
186
+
187
+ def __iter__(self):
188
+ return iter(self.all_keys)
189
+
190
+ def __len__(self):
191
+ return len(self.all_keys)
192
+
193
+
194
+ def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
195
+ """
196
+ Extract the sub state-dict corresponding to a list of given submodules.
197
+
198
+ Args:
199
+ state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
200
+ submodule_names (`List[str]`): The list of submodule names we want to extract.
201
+ """
202
+ result = {}
203
+ for module_name in submodule_names:
204
+ # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the
205
+ # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance)
206
+ result.update(
207
+ {
208
+ key: param
209
+ for key, param in state_dict.items()
210
+ if key == module_name or key.startswith(module_name + ".")
211
+ }
212
+ )
213
+ return result
llmeval-env/lib/python3.10/site-packages/accelerate/utils/operations.py ADDED
@@ -0,0 +1,848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ A set of basic tensor ops compatible with tpu, gpu, and multigpu
16
+ """
17
+
18
+ import pickle
19
+ import warnings
20
+ from functools import update_wrapper, wraps
21
+ from typing import Any, Mapping
22
+
23
+ import torch
24
+
25
+ from ..state import PartialState
26
+ from .constants import TORCH_DISTRIBUTED_OPERATION_TYPES
27
+ from .dataclasses import DistributedType, TensorInformation
28
+ from .imports import (
29
+ is_npu_available,
30
+ is_torch_distributed_available,
31
+ is_torch_version,
32
+ is_torch_xla_available,
33
+ is_xpu_available,
34
+ )
35
+
36
+
37
+ if is_torch_xla_available():
38
+ import torch_xla.core.xla_model as xm
39
+
40
+ if is_torch_distributed_available():
41
+ from torch.distributed import ReduceOp
42
+
43
+
44
+ def is_torch_tensor(tensor):
45
+ return isinstance(tensor, torch.Tensor)
46
+
47
+
48
+ def is_torch_xpu_tensor(tensor):
49
+ return isinstance(
50
+ tensor,
51
+ torch.xpu.FloatTensor,
52
+ torch.xpu.ByteTensor,
53
+ torch.xpu.IntTensor,
54
+ torch.xpu.LongTensor,
55
+ torch.xpu.HalfTensor,
56
+ torch.xpu.DoubleTensor,
57
+ torch.xpu.BFloat16Tensor,
58
+ )
59
+
60
+
61
+ def is_tensor_information(tensor_info):
62
+ return isinstance(tensor_info, TensorInformation)
63
+
64
+
65
+ def is_namedtuple(data):
66
+ """
67
+ Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
68
+ `namedtuple` perfectly.
69
+ """
70
+ return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
71
+
72
+
73
+ def honor_type(obj, generator):
74
+ """
75
+ Cast a generator to the same type as obj (list, tuple, or namedtuple)
76
+ """
77
+ # Some objects may not be able to instantiate from a generator directly
78
+ if is_namedtuple(obj):
79
+ return type(obj)(*list(generator))
80
+ else:
81
+ return type(obj)(generator)
82
+
83
+
84
+ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
85
+ """
86
+ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
87
+
88
+ Args:
89
+ func (`callable`):
90
+ The function to recursively apply.
91
+ data (nested list/tuple/dictionary of `main_type`):
92
+ The data on which to apply `func`
93
+ *args:
94
+ Positional arguments that will be passed to `func` when applied on the unpacked data.
95
+ main_type (`type`, *optional*, defaults to `torch.Tensor`):
96
+ The base type of the objects to which apply `func`.
97
+ error_on_other_type (`bool`, *optional*, defaults to `False`):
98
+ Whether to return an error or not if after unpacking `data`, we get on an object that is not of type
99
+ `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
100
+ **kwargs (additional keyword arguments, *optional*):
101
+ Keyword arguments that will be passed to `func` when applied on the unpacked data.
102
+
103
+ Returns:
104
+ The same data structure as `data` with `func` applied to every object of type `main_type`.
105
+ """
106
+ if isinstance(data, (tuple, list)):
107
+ return honor_type(
108
+ data,
109
+ (
110
+ recursively_apply(
111
+ func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
112
+ )
113
+ for o in data
114
+ ),
115
+ )
116
+ elif isinstance(data, Mapping):
117
+ return type(data)(
118
+ {
119
+ k: recursively_apply(
120
+ func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs
121
+ )
122
+ for k, v in data.items()
123
+ }
124
+ )
125
+ elif test_type(data):
126
+ return func(data, *args, **kwargs)
127
+ elif error_on_other_type:
128
+ raise TypeError(
129
+ f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of "
130
+ f"objects that are valid for `{test_type.__name__}` should be passed."
131
+ )
132
+ return data
133
+
134
+
135
+ def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
136
+ """
137
+ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
138
+
139
+ Args:
140
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
141
+ The data to send to a given device.
142
+ device (`torch.device`):
143
+ The device to send the data to.
144
+
145
+ Returns:
146
+ The same data structure as `tensor` with all tensors sent to the proper device.
147
+ """
148
+ if is_torch_tensor(tensor) or hasattr(tensor, "to"):
149
+ # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)).
150
+ if device == "npu":
151
+ device = "npu:0"
152
+ if device == "xpu":
153
+ device = "xpu:0"
154
+ # TODO: torch_mlu LongTensor.to(<int num>) has bugs, we will fix this later.
155
+ if is_torch_tensor(tensor) and tensor.device.type in ["mlu"] and tensor.dtype in [torch.int64]:
156
+ tensor = tensor.cpu()
157
+ try:
158
+ return tensor.to(device, non_blocking=non_blocking)
159
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
160
+ return tensor.to(device)
161
+ except AssertionError as error:
162
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
163
+ # This call is inside the try-block since is_npu_available is not supported by torch.compile.
164
+ if is_npu_available():
165
+ if isinstance(device, int):
166
+ device = f"npu:{device}"
167
+ elif is_xpu_available():
168
+ if isinstance(device, int):
169
+ device = f"xpu:{device}"
170
+ else:
171
+ raise error
172
+ try:
173
+ return tensor.to(device, non_blocking=non_blocking)
174
+ except TypeError: # .to() doesn't accept non_blocking as kwarg
175
+ return tensor.to(device)
176
+ elif isinstance(tensor, (tuple, list)):
177
+ return honor_type(
178
+ tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor)
179
+ )
180
+ elif isinstance(tensor, Mapping):
181
+ if isinstance(skip_keys, str):
182
+ skip_keys = [skip_keys]
183
+ elif skip_keys is None:
184
+ skip_keys = []
185
+ return type(tensor)(
186
+ {
187
+ k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys)
188
+ for k, t in tensor.items()
189
+ }
190
+ )
191
+ else:
192
+ return tensor
193
+
194
+
195
+ def get_data_structure(data):
196
+ """
197
+ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
198
+
199
+ Args:
200
+ data (nested list/tuple/dictionary of `torch.Tensor`):
201
+ The data to send to analyze.
202
+
203
+ Returns:
204
+ The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
205
+ """
206
+
207
+ def _get_data_structure(tensor):
208
+ return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
209
+
210
+ return recursively_apply(_get_data_structure, data)
211
+
212
+
213
+ def get_shape(data):
214
+ """
215
+ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
216
+
217
+ Args:
218
+ data (nested list/tuple/dictionary of `torch.Tensor`):
219
+ The data to send to analyze.
220
+
221
+ Returns:
222
+ The same data structure as `data` with lists of tensor shapes instead of tensors.
223
+ """
224
+
225
+ def _get_shape(tensor):
226
+ return list(tensor.shape)
227
+
228
+ return recursively_apply(_get_shape, data)
229
+
230
+
231
+ def initialize_tensors(data_structure):
232
+ """
233
+ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
234
+
235
+ Returns:
236
+ The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
237
+ """
238
+
239
+ def _initialize_tensor(tensor_info):
240
+ return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
241
+
242
+ return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
243
+
244
+
245
+ def find_batch_size(data):
246
+ """
247
+ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
248
+
249
+ Args:
250
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
251
+
252
+ Returns:
253
+ `int`: The batch size.
254
+ """
255
+ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
256
+ raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
257
+
258
+ if isinstance(data, (tuple, list)):
259
+ return find_batch_size(data[0])
260
+ elif isinstance(data, Mapping):
261
+ for k in data.keys():
262
+ return find_batch_size(data[k])
263
+ elif not isinstance(data, torch.Tensor):
264
+ raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
265
+ return data.shape[0]
266
+
267
+
268
+ def ignorant_find_batch_size(data):
269
+ """
270
+ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised
271
+
272
+ Args:
273
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
274
+
275
+ Returns:
276
+ `int`: The batch size.
277
+ """
278
+ try:
279
+ return find_batch_size(data)
280
+ except (ValueError, TypeError):
281
+ pass
282
+ return None
283
+
284
+
285
+ def listify(data):
286
+ """
287
+ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
288
+
289
+ Args:
290
+ data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
291
+
292
+ Returns:
293
+ The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
294
+ """
295
+
296
+ def _convert_to_list(tensor):
297
+ tensor = tensor.detach().cpu()
298
+ if tensor.dtype == torch.bfloat16:
299
+ # As of Numpy 1.21.4, NumPy does not support bfloat16 (see
300
+ # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).
301
+ # Until Numpy adds bfloat16, we must convert float32.
302
+ tensor = tensor.to(torch.float32)
303
+ return tensor.tolist()
304
+
305
+ return recursively_apply(_convert_to_list, data)
306
+
307
+
308
+ def _tpu_gather(tensor):
309
+ def _tpu_gather_one(tensor):
310
+ if tensor.ndim == 0:
311
+ tensor = tensor.clone()[None]
312
+
313
+ # Can only gather contiguous tensors
314
+ if not tensor.is_contiguous():
315
+ tensor = tensor.contiguous()
316
+ return xm.all_gather(tensor)
317
+
318
+ res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
319
+ xm.mark_step()
320
+ return res
321
+
322
+
323
+ def _gpu_gather(tensor):
324
+ state = PartialState()
325
+ if is_torch_version(">=", "1.13"):
326
+ gather_op = torch.distributed.all_gather_into_tensor
327
+ else:
328
+ gather_op = torch.distributed._all_gather_base
329
+
330
+ def _gpu_gather_one(tensor):
331
+ if tensor.ndim == 0:
332
+ tensor = tensor.clone()[None]
333
+
334
+ # Can only gather contiguous tensors
335
+ if not tensor.is_contiguous():
336
+ tensor = tensor.contiguous()
337
+
338
+ if state.backend is not None and state.backend != "gloo":
339
+ # We use `empty` as `all_gather_into_tensor` slightly
340
+ # differs from `all_gather` for better efficiency,
341
+ # and we rely on the number of items in the tensor
342
+ # rather than its direct shape
343
+ output_tensors = torch.empty(
344
+ state.num_processes * tensor.numel(),
345
+ dtype=tensor.dtype,
346
+ device=state.device,
347
+ )
348
+ gather_op(output_tensors, tensor)
349
+ return output_tensors.view(-1, *tensor.size()[1:])
350
+ else:
351
+ # a backend of `None` is always CPU
352
+ # also gloo does not support `all_gather_into_tensor`,
353
+ # which will result in a larger memory overhead for the op
354
+ output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
355
+ torch.distributed.all_gather(output_tensors, tensor)
356
+ return torch.cat(output_tensors, dim=0)
357
+
358
+ return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
359
+
360
+
361
+ class DistributedOperationException(Exception):
362
+ """
363
+ An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
364
+ tensors.
365
+ """
366
+
367
+ pass
368
+
369
+
370
+ def verify_operation(function):
371
+ """
372
+ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
373
+ """
374
+
375
+ @wraps(function)
376
+ def wrapper(*args, **kwargs):
377
+ if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:
378
+ return function(*args, **kwargs)
379
+ operation = f"{function.__module__}.{function.__name__}"
380
+ if "tensor" in kwargs:
381
+ tensor = kwargs["tensor"]
382
+ else:
383
+ tensor = args[0]
384
+ if PartialState().device.type != find_device(tensor).type:
385
+ raise DistributedOperationException(
386
+ f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. "
387
+ f"Please move it to the {PartialState().device.type} before calling {operation}."
388
+ )
389
+ shapes = get_shape(tensor)
390
+ output = gather_object([shapes])
391
+ if output[0] is not None:
392
+ are_same = output.count(output[0]) == len(output)
393
+ if not are_same:
394
+ process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)])
395
+ raise DistributedOperationException(
396
+ f"Cannot apply desired operation due to shape mismatches. "
397
+ "All shapes across devices must be valid."
398
+ f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
399
+ )
400
+ return function(*args, **kwargs)
401
+
402
+ return wrapper
403
+
404
+
405
+ def chained_operation(function):
406
+ """
407
+ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
408
+ `DistributedOperationException`.
409
+ """
410
+
411
+ @wraps(function)
412
+ def wrapper(*args, **kwargs):
413
+ try:
414
+ return function(*args, **kwargs)
415
+ except DistributedOperationException as e:
416
+ operation = f"{function.__module__}.{function.__name__}"
417
+ raise DistributedOperationException(
418
+ f"Error found while calling `{operation}`. Please see the earlier error for more details."
419
+ ) from e
420
+
421
+ return wrapper
422
+
423
+
424
+ @verify_operation
425
+ def gather(tensor):
426
+ """
427
+ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
428
+
429
+ Args:
430
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
431
+ The data to gather.
432
+
433
+ Returns:
434
+ The same data structure as `tensor` with all tensors sent to the proper device.
435
+ """
436
+ if PartialState().distributed_type == DistributedType.XLA:
437
+ return _tpu_gather(tensor)
438
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
439
+ return _gpu_gather(tensor)
440
+ else:
441
+ return tensor
442
+
443
+
444
+ def _gpu_gather_object(object: Any):
445
+ output_objects = [None for _ in range(PartialState().num_processes)]
446
+ torch.distributed.all_gather_object(output_objects, object)
447
+ # all_gather_object returns a list of lists, so we need to flatten it
448
+ return [x for y in output_objects for x in y]
449
+
450
+
451
+ def gather_object(object: Any):
452
+ """
453
+ Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
454
+
455
+ Args:
456
+ object (nested list/tuple/dictionary of picklable object):
457
+ The data to gather.
458
+
459
+ Returns:
460
+ The same data structure as `object` with all the objects sent to every device.
461
+ """
462
+ if PartialState().distributed_type == DistributedType.XLA:
463
+ raise NotImplementedError("gather objects in TPU is not supported")
464
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
465
+ return _gpu_gather_object(object)
466
+ else:
467
+ return object
468
+
469
+
470
+ def _gpu_broadcast(data, src=0):
471
+ def _gpu_broadcast_one(tensor, src=0):
472
+ torch.distributed.broadcast(tensor, src=src)
473
+ return tensor
474
+
475
+ return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
476
+
477
+
478
+ def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
479
+ if isinstance(tensor, (list, tuple)):
480
+ return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
481
+ elif isinstance(tensor, Mapping):
482
+ return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
483
+ return xm.mesh_reduce(name, tensor, lambda x: x[src])
484
+
485
+
486
+ TENSOR_TYPE_TO_INT = {
487
+ torch.float: 1,
488
+ torch.double: 2,
489
+ torch.half: 3,
490
+ torch.bfloat16: 4,
491
+ torch.uint8: 5,
492
+ torch.int8: 6,
493
+ torch.int16: 7,
494
+ torch.int32: 8,
495
+ torch.int64: 9,
496
+ torch.bool: 10,
497
+ }
498
+
499
+ TENSOR_INT_TO_DTYPE = {v: k for k, v in TENSOR_TYPE_TO_INT.items()}
500
+
501
+
502
+ def gather_tensor_shape(tensor):
503
+ """
504
+ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
505
+ """
506
+ # Allocate 80 bytes to store the shape
507
+ max_tensor_dimension = 2**20
508
+ state = PartialState()
509
+ base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device)
510
+
511
+ # Since PyTorch can't just send a tensor to another GPU without
512
+ # knowing its size, we store the size of the tensor with data
513
+ # in an allocation
514
+ if tensor is not None:
515
+ shape = tensor.shape
516
+ tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype]
517
+ base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int)
518
+ # Perform a reduction to copy the size data onto all GPUs
519
+ base_tensor = reduce(base_tensor, reduction="sum")
520
+ base_tensor = base_tensor[base_tensor.nonzero()]
521
+ # The last non-zero data contains the coded dtype the source tensor is
522
+ dtype = int(base_tensor[-1:][0])
523
+ base_tensor = base_tensor[:-1]
524
+ return base_tensor, dtype
525
+
526
+
527
+ def copy_tensor_to_devices(tensor=None) -> torch.Tensor:
528
+ """
529
+ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as
530
+ each worker doesn't need to know its shape when used (and tensor can be `None`)
531
+
532
+ Args:
533
+ tensor (`torch.tensor`):
534
+ The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest
535
+ should be `None`.
536
+ """
537
+ state = PartialState()
538
+ shape, dtype = gather_tensor_shape(tensor)
539
+ if tensor is None:
540
+ tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device)
541
+ return reduce(tensor, reduction="sum")
542
+
543
+
544
+ @verify_operation
545
+ def broadcast(tensor, from_process: int = 0):
546
+ """
547
+ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
548
+
549
+ Args:
550
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
551
+ The data to gather.
552
+ from_process (`int`, *optional*, defaults to 0):
553
+ The process from which to send the data
554
+
555
+ Returns:
556
+ The same data structure as `tensor` with all tensors broadcasted to the proper device.
557
+ """
558
+ if PartialState().distributed_type == DistributedType.XLA:
559
+ return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast")
560
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
561
+ return _gpu_broadcast(tensor, src=from_process)
562
+ else:
563
+ return tensor
564
+
565
+
566
+ def broadcast_object_list(object_list, from_process: int = 0):
567
+ """
568
+ Broadcast a list of picklable objects form one process to the others.
569
+
570
+ Args:
571
+ object_list (list of picklable objects):
572
+ The list of objects to broadcast. This list will be modified inplace.
573
+ from_process (`int`, *optional*, defaults to 0):
574
+ The process from which to send the data.
575
+
576
+ Returns:
577
+ The same list containing the objects from process 0.
578
+ """
579
+ if PartialState().distributed_type == DistributedType.XLA:
580
+ for i, obj in enumerate(object_list):
581
+ object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process])
582
+ elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
583
+ torch.distributed.broadcast_object_list(object_list, src=from_process)
584
+ return object_list
585
+
586
+
587
+ def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
588
+ """
589
+ Recursively takes a slice in a nested list/tuple/dictionary of tensors.
590
+
591
+ Args:
592
+ data (nested list/tuple/dictionary of `torch.Tensor`):
593
+ The data to slice.
594
+ tensor_slice (`slice`):
595
+ The slice to take.
596
+
597
+ Returns:
598
+ The same data structure as `data` with all the tensors slices.
599
+ """
600
+
601
+ def _slice_tensor(tensor, tensor_slice):
602
+ return tensor[tensor_slice]
603
+
604
+ return recursively_apply(_slice_tensor, data, tensor_slice)
605
+
606
+
607
+ def concatenate(data, dim=0):
608
+ """
609
+ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
610
+
611
+ Args:
612
+ data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
613
+ The data to concatenate.
614
+ dim (`int`, *optional*, defaults to 0):
615
+ The dimension on which to concatenate.
616
+
617
+ Returns:
618
+ The same data structure as `data` with all the tensors concatenated.
619
+ """
620
+ if isinstance(data[0], (tuple, list)):
621
+ return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0]))))
622
+ elif isinstance(data[0], Mapping):
623
+ return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()})
624
+ elif not isinstance(data[0], torch.Tensor):
625
+ raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
626
+ return torch.cat(data, dim=dim)
627
+
628
+
629
+ class CannotPadNestedTensorWarning(UserWarning):
630
+ pass
631
+
632
+
633
+ @chained_operation
634
+ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
635
+ """
636
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
637
+ can safely be gathered.
638
+
639
+ Args:
640
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
641
+ The data to gather.
642
+ dim (`int`, *optional*, defaults to 0):
643
+ The dimension on which to pad.
644
+ pad_index (`int`, *optional*, defaults to 0):
645
+ The value with which to pad.
646
+ pad_first (`bool`, *optional*, defaults to `False`):
647
+ Whether to pad at the beginning or the end.
648
+ """
649
+
650
+ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
651
+ if getattr(tensor, "is_nested", False):
652
+ warnings.warn(
653
+ "Cannot pad nested tensors without more information. Leaving unprocessed.",
654
+ CannotPadNestedTensorWarning,
655
+ )
656
+ return tensor
657
+ if dim >= len(tensor.shape):
658
+ return tensor
659
+
660
+ # Gather all sizes
661
+ size = torch.tensor(tensor.shape, device=tensor.device)[None]
662
+ sizes = gather(size).cpu()
663
+ # Then pad to the maximum size
664
+ max_size = max(s[dim] for s in sizes)
665
+ if max_size == tensor.shape[dim]:
666
+ return tensor
667
+
668
+ old_size = tensor.shape
669
+ new_size = list(old_size)
670
+ new_size[dim] = max_size
671
+ new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
672
+ if pad_first:
673
+ indices = tuple(
674
+ slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size))
675
+ )
676
+ else:
677
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
678
+ new_tensor[indices] = tensor
679
+ return new_tensor
680
+
681
+ return recursively_apply(
682
+ _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
683
+ )
684
+
685
+
686
+ def pad_input_tensors(tensor, batch_size, num_processes, dim=0):
687
+ """
688
+ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions.
689
+
690
+ New tensors are just the last input repeated.
691
+
692
+ E.g.:
693
+ Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
694
+
695
+ """
696
+
697
+ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0):
698
+ remainder = batch_size // num_processes
699
+ last_inputs = batch_size - (remainder * num_processes)
700
+ if batch_size // num_processes == 0:
701
+ to_pad = num_processes - batch_size
702
+ else:
703
+ to_pad = num_processes - (batch_size // num_processes)
704
+ # In the rare case that `to_pad` is negative,
705
+ # we need to pad the last inputs - the found `to_pad`
706
+ if last_inputs > to_pad & to_pad < 1:
707
+ to_pad = last_inputs - to_pad
708
+ old_size = tensor.shape
709
+ new_size = list(old_size)
710
+ new_size[0] = batch_size + to_pad
711
+ new_tensor = tensor.new_zeros(tuple(new_size))
712
+ indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
713
+ new_tensor[indices] = tensor
714
+ return new_tensor
715
+
716
+ return recursively_apply(
717
+ _pad_input_tensors,
718
+ tensor,
719
+ error_on_other_type=True,
720
+ batch_size=batch_size,
721
+ num_processes=num_processes,
722
+ dim=dim,
723
+ )
724
+
725
+
726
+ @verify_operation
727
+ def reduce(tensor, reduction="mean", scale=1.0):
728
+ """
729
+ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
730
+ mean of a given operation.
731
+
732
+ Args:
733
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
734
+ The data to reduce.
735
+ reduction (`str`, *optional*, defaults to `"mean"`):
736
+ A reduction method. Can be of "mean", "sum", or "none"
737
+ scale (`float`, *optional*):
738
+ A default scaling value to be applied after the reduce, only valied on XLA.
739
+
740
+ Returns:
741
+ The same data structure as `data` with all the tensors reduced.
742
+ """
743
+
744
+ def _reduce_across_processes(tensor, reduction="mean", scale=1.0):
745
+ state = PartialState()
746
+ cloned_tensor = tensor.clone()
747
+ if state.distributed_type == DistributedType.NO:
748
+ return cloned_tensor
749
+ if state.distributed_type == DistributedType.XLA:
750
+ # Some processes may have different HLO graphs than other
751
+ # processes, for example in the breakpoint API
752
+ # accelerator.set_trigger(). Use mark_step to make HLOs
753
+ # the same on all processes.
754
+ xm.mark_step()
755
+ xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale)
756
+ xm.mark_step()
757
+ elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:
758
+ torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)
759
+ if reduction == "mean":
760
+ cloned_tensor /= state.num_processes
761
+ return cloned_tensor
762
+
763
+ return recursively_apply(
764
+ _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
765
+ )
766
+
767
+
768
+ def convert_to_fp32(tensor):
769
+ """
770
+ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
771
+
772
+ Args:
773
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
774
+ The data to convert from FP16/BF16 to FP32.
775
+
776
+ Returns:
777
+ The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
778
+ """
779
+
780
+ def _convert_to_fp32(tensor):
781
+ return tensor.float()
782
+
783
+ def _is_fp16_bf16_tensor(tensor):
784
+ return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in (
785
+ torch.float16,
786
+ torch.bfloat16,
787
+ )
788
+
789
+ return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
790
+
791
+
792
+ class ConvertOutputsToFp32:
793
+ """
794
+ Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
795
+ precision will be convert back to FP32.
796
+
797
+ Args:
798
+ model_forward (`Callable`):
799
+ The function which outputs we want to treat.
800
+
801
+ Returns:
802
+ The same function as `model_forward` but with converted outputs.
803
+ """
804
+
805
+ def __init__(self, model_forward):
806
+ self.model_forward = model_forward
807
+ update_wrapper(self, model_forward)
808
+
809
+ def __call__(self, *args, **kwargs):
810
+ return convert_to_fp32(self.model_forward(*args, **kwargs))
811
+
812
+ def __getstate__(self):
813
+ raise pickle.PicklingError(
814
+ "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
815
+ )
816
+
817
+
818
+ def convert_outputs_to_fp32(model_forward):
819
+ model_forward = ConvertOutputsToFp32(model_forward)
820
+
821
+ def forward(*args, **kwargs):
822
+ return model_forward(*args, **kwargs)
823
+
824
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
825
+ forward.__wrapped__ = model_forward
826
+
827
+ return forward
828
+
829
+
830
+ def find_device(data):
831
+ """
832
+ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
833
+
834
+ Args:
835
+ (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
836
+ """
837
+ if isinstance(data, Mapping):
838
+ for obj in data.values():
839
+ device = find_device(obj)
840
+ if device is not None:
841
+ return device
842
+ elif isinstance(data, (tuple, list)):
843
+ for obj in data:
844
+ device = find_device(obj)
845
+ if device is not None:
846
+ return device
847
+ elif isinstance(data, torch.Tensor):
848
+ return data.device
llmeval-env/lib/python3.10/site-packages/accelerate/utils/random.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from typing import List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from ..state import AcceleratorState
22
+ from .constants import CUDA_DISTRIBUTED_TYPES
23
+ from .dataclasses import DistributedType, RNGType
24
+ from .imports import is_mlu_available, is_npu_available, is_torch_xla_available, is_xpu_available
25
+
26
+
27
+ if is_torch_xla_available():
28
+ import torch_xla.core.xla_model as xm
29
+
30
+
31
+ def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False):
32
+ """
33
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
34
+
35
+ Args:
36
+ seed (`int`):
37
+ The seed to set.
38
+ device_specific (`bool`, *optional*, defaults to `False`):
39
+ Whether to differ the seed on each device slightly with `self.process_index`.
40
+ deterministic (`bool`, *optional*, defaults to `False`):
41
+ Whether to use deterministic algorithms where available. Can slow down training.
42
+ """
43
+ if device_specific:
44
+ seed += AcceleratorState().process_index
45
+ random.seed(seed)
46
+ np.random.seed(seed)
47
+ torch.manual_seed(seed)
48
+ if is_xpu_available():
49
+ torch.xpu.manual_seed_all(seed)
50
+ elif is_npu_available():
51
+ torch.npu.manual_seed_all(seed)
52
+ elif is_mlu_available():
53
+ torch.mlu.manual_seed_all(seed)
54
+ else:
55
+ torch.cuda.manual_seed_all(seed)
56
+ # ^^ safe to call this function even if cuda is not available
57
+ if is_torch_xla_available():
58
+ xm.set_rng_state(seed)
59
+
60
+ if deterministic:
61
+ torch.use_deterministic_algorithms(True)
62
+
63
+
64
+ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
65
+ # Get the proper rng state
66
+ if rng_type == RNGType.TORCH:
67
+ rng_state = torch.get_rng_state()
68
+ elif rng_type == RNGType.CUDA:
69
+ rng_state = torch.cuda.get_rng_state()
70
+ elif rng_type == RNGType.XLA:
71
+ assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable."
72
+ rng_state = torch.tensor(xm.get_rng_state())
73
+ elif rng_type == RNGType.NPU:
74
+ assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs."
75
+ rng_state = torch.npu.get_rng_state()
76
+ elif rng_type == RNGType.MLU:
77
+ assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs."
78
+ rng_state = torch.mlu.get_rng_state()
79
+ elif rng_type == RNGType.XPU:
80
+ assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs."
81
+ rng_state = torch.xpu.get_rng_state()
82
+ elif rng_type == RNGType.GENERATOR:
83
+ assert generator is not None, "Need a generator to synchronize its seed."
84
+ rng_state = generator.get_state()
85
+
86
+ # Broadcast the rng state from device 0 to other devices
87
+ state = AcceleratorState()
88
+ if state.distributed_type == DistributedType.XLA:
89
+ rng_state = rng_state.to(xm.xla_device())
90
+ xm.collective_broadcast([rng_state])
91
+ xm.mark_step()
92
+ rng_state = rng_state.cpu()
93
+ elif (
94
+ state.distributed_type in CUDA_DISTRIBUTED_TYPES
95
+ or state.distributed_type == DistributedType.MULTI_MLU
96
+ or state.distributed_type == DistributedType.MULTI_NPU
97
+ or state.distributed_type == DistributedType.MULTI_XPU
98
+ ):
99
+ rng_state = rng_state.to(state.device)
100
+ torch.distributed.broadcast(rng_state, 0)
101
+ rng_state = rng_state.cpu()
102
+ elif state.distributed_type == DistributedType.MULTI_CPU:
103
+ torch.distributed.broadcast(rng_state, 0)
104
+
105
+ # Set the broadcast rng state
106
+ if rng_type == RNGType.TORCH:
107
+ torch.set_rng_state(rng_state)
108
+ elif rng_type == RNGType.CUDA:
109
+ torch.cuda.set_rng_state(rng_state)
110
+ elif rng_type == RNGType.NPU:
111
+ torch.npu.set_rng_state(rng_state)
112
+ elif rng_type == RNGType.MLU:
113
+ torch.mlu.set_rng_state(rng_state)
114
+ elif rng_type == RNGType.XPU:
115
+ torch.xpu.set_rng_state(rng_state)
116
+ elif rng_type == RNGType.XLA:
117
+ xm.set_rng_state(rng_state.item())
118
+ elif rng_type == RNGType.GENERATOR:
119
+ generator.set_state(rng_state)
120
+
121
+
122
+ def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
123
+ for rng_type in rng_types:
124
+ synchronize_rng_state(RNGType(rng_type), generator=generator)
llmeval-env/lib/python3.10/site-packages/accelerate/utils/rich.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .imports import is_rich_available
16
+
17
+
18
+ if is_rich_available():
19
+ from rich.traceback import install
20
+
21
+ install(show_locals=False)
22
+
23
+ else:
24
+ raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2016 Tsuyoshi Hombashi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/METADATA ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: mbstrdecoder
3
+ Version: 1.1.3
4
+ Summary: mbstrdecoder is a Python library for multi-byte character string decoder
5
+ Home-page: https://github.com/thombashi/mbstrdecoder
6
+ Author: Tsuyoshi Hombashi
7
+ Author-email: [email protected]
8
+ License: MIT License
9
+ Project-URL: Source, https://github.com/thombashi/mbstrdecoder
10
+ Project-URL: Tracker, https://github.com/thombashi/mbstrdecoder/issues
11
+ Keywords: multi-byte character,unicode,decoder
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Information Technology
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Programming Language :: Python :: Implementation :: CPython
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ Classifier: Topic :: Software Development :: Libraries
27
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
28
+ Classifier: Topic :: Text Processing
29
+ Requires-Python: >=3.7
30
+ Description-Content-Type: text/x-rst
31
+ License-File: LICENSE
32
+ Requires-Dist: chardet (<6,>=3.0.4)
33
+ Provides-Extra: test
34
+ Requires-Dist: Faker (>=1.0.2) ; extra == 'test'
35
+ Requires-Dist: pytest (>=6.0.1) ; extra == 'test'
36
+ Requires-Dist: pytest-md-report (>=0.1) ; extra == 'test'
37
+
38
+ .. contents:: **mbstrdecoder**
39
+ :backlinks: top
40
+ :local:
41
+
42
+
43
+ Summary
44
+ =======
45
+ `mbstrdecoder <https://github.com/thombashi/mbstrdecoder>`__ is a Python library for multi-byte character string decoder.
46
+
47
+
48
+ .. image:: https://badge.fury.io/py/mbstrdecoder.svg
49
+ :target: https://badge.fury.io/py/mbstrdecoder
50
+ :alt: PyPI package version
51
+
52
+ .. image:: https://img.shields.io/pypi/pyversions/mbstrdecoder.svg
53
+ :target: https://pypi.org/project/mbstrdecoder
54
+ :alt: Supported Python versions
55
+
56
+ .. image:: https://img.shields.io/pypi/implementation/mbstrdecoder.svg
57
+ :target: https://pypi.org/project/mbstrdecoder
58
+ :alt: Supported Python implementations
59
+
60
+ .. image:: https://github.com/thombashi/mbstrdecoder/actions/workflows/lint_and_test.yml/badge.svg
61
+ :target: https://github.com/thombashi/mbstrdecoder/actions/workflows/lint_and_test.yml
62
+ :alt: CI status of Linux/macOS/Windows
63
+
64
+ .. image:: https://coveralls.io/repos/github/thombashi/mbstrdecoder/badge.svg?branch=master
65
+ :target: https://coveralls.io/github/thombashi/mbstrdecoder?branch=master
66
+ :alt: Test coverage
67
+
68
+ .. image:: https://github.com/thombashi/mbstrdecoder/actions/workflows/codeql-analysis.yml/badge.svg
69
+ :target: https://github.com/thombashi/mbstrdecoder/actions/workflows/codeql-analysis.yml
70
+ :alt: CodeQL
71
+
72
+
73
+ Installation
74
+ ============
75
+
76
+ Install from PyPI
77
+ ------------------------------
78
+ ::
79
+
80
+ pip install mbstrdecoder
81
+
82
+ Install from PPA (for Ubuntu)
83
+ ------------------------------
84
+ ::
85
+
86
+ sudo add-apt-repository ppa:thombashi/ppa
87
+ sudo apt update
88
+ sudo apt install python3-mbstrdecoder
89
+
90
+
91
+ Usage
92
+ =====
93
+
94
+ :Sample Code:
95
+ .. code:: python
96
+
97
+ from mbstrdecoder import MultiByteStrDecoder
98
+
99
+ encoded_multibyte_text = "マルチバイト文字".encode("utf-8")
100
+ decoder = MultiByteStrDecoder(encoded_multibyte_text)
101
+
102
+ print("encoded bytes: {}".format(encoded_multibyte_text))
103
+ print("unicode: {}".format(decoder.unicode_str))
104
+ print("codec: {}".format(decoder.codec))
105
+
106
+ :Output:
107
+ ::
108
+
109
+ encoded bytes: b'\xe3\x83\x9e\xe3\x83\xab\xe3\x83\x81\xe3\x83\x90\xe3\x82\xa4\xe3\x83\x88\xe6\x96\x87\xe5\xad\x97'
110
+ unicode: マルチバイト文字
111
+ codec: utf_8
112
+
113
+
114
+ Dependencies
115
+ ============
116
+ - Python 3.7+
117
+ - `Python package dependencies (automatically installed) <https://github.com/thombashi/mbstrdecoder/network/dependencies>`__
llmeval-env/lib/python3.10/site-packages/mbstrdecoder-1.1.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ mbstrdecoder
llmeval-env/lib/python3.10/site-packages/networkx/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ NetworkX
3
+ ========
4
+
5
+ NetworkX is a Python package for the creation, manipulation, and study of the
6
+ structure, dynamics, and functions of complex networks.
7
+
8
+ See https://networkx.org for complete documentation.
9
+ """
10
+
11
+ __version__ = "3.3"
12
+
13
+
14
+ # These are imported in order as listed
15
+ from networkx.lazy_imports import _lazy_import
16
+
17
+ from networkx.exception import *
18
+
19
+ from networkx import utils
20
+ from networkx.utils import _clear_cache, _dispatchable, config
21
+
22
+ from networkx import classes
23
+ from networkx.classes import filters
24
+ from networkx.classes import *
25
+
26
+ from networkx import convert
27
+ from networkx.convert import *
28
+
29
+ from networkx import convert_matrix
30
+ from networkx.convert_matrix import *
31
+
32
+ from networkx import relabel
33
+ from networkx.relabel import *
34
+
35
+ from networkx import generators
36
+ from networkx.generators import *
37
+
38
+ from networkx import readwrite
39
+ from networkx.readwrite import *
40
+
41
+ # Need to test with SciPy, when available
42
+ from networkx import algorithms
43
+ from networkx.algorithms import *
44
+
45
+ from networkx import linalg
46
+ from networkx.linalg import *
47
+
48
+ from networkx import drawing
49
+ from networkx.drawing import *
llmeval-env/lib/python3.10/site-packages/networkx/conftest.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Testing
3
+ =======
4
+
5
+ General guidelines for writing good tests:
6
+
7
+ - doctests always assume ``import networkx as nx`` so don't add that
8
+ - prefer pytest fixtures over classes with setup methods.
9
+ - use the ``@pytest.mark.parametrize`` decorator
10
+ - use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.
11
+ and add the module to the relevant entries below.
12
+
13
+ """
14
+ import os
15
+ import sys
16
+ import warnings
17
+ from importlib.metadata import entry_points
18
+
19
+ import pytest
20
+
21
+ import networkx
22
+
23
+
24
+ def pytest_addoption(parser):
25
+ parser.addoption(
26
+ "--runslow", action="store_true", default=False, help="run slow tests"
27
+ )
28
+ parser.addoption(
29
+ "--backend",
30
+ action="store",
31
+ default=None,
32
+ help="Run tests with a backend by auto-converting nx graphs to backend graphs",
33
+ )
34
+ parser.addoption(
35
+ "--fallback-to-nx",
36
+ action="store_true",
37
+ default=False,
38
+ help="Run nx function if a backend doesn't implement a dispatchable function"
39
+ " (use with --backend)",
40
+ )
41
+
42
+
43
+ def pytest_configure(config):
44
+ config.addinivalue_line("markers", "slow: mark test as slow to run")
45
+ backend = config.getoption("--backend")
46
+ if backend is None:
47
+ backend = os.environ.get("NETWORKX_TEST_BACKEND")
48
+ # nx-loopback backend is only available when testing
49
+ backends = entry_points(name="nx-loopback", group="networkx.backends")
50
+ if backends:
51
+ networkx.utils.backends.backends["nx-loopback"] = next(iter(backends))
52
+ else:
53
+ warnings.warn(
54
+ "\n\n WARNING: Mixed NetworkX configuration! \n\n"
55
+ " This environment has mixed configuration for networkx.\n"
56
+ " The test object nx-loopback is not configured correctly.\n"
57
+ " You should not be seeing this message.\n"
58
+ " Try `pip install -e .`, or change your PYTHONPATH\n"
59
+ " Make sure python finds the networkx repo you are testing\n\n"
60
+ )
61
+ if backend:
62
+ networkx.config["backend_priority"] = [backend]
63
+ fallback_to_nx = config.getoption("--fallback-to-nx")
64
+ if not fallback_to_nx:
65
+ fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX")
66
+ networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)
67
+
68
+
69
+ def pytest_collection_modifyitems(config, items):
70
+ # Setting this to True here allows tests to be set up before dispatching
71
+ # any function call to a backend.
72
+ networkx.utils.backends._dispatchable._is_testing = True
73
+ if backend_priority := networkx.config["backend_priority"]:
74
+ # Allow pluggable backends to add markers to tests (such as skip or xfail)
75
+ # when running in auto-conversion test mode
76
+ backend = networkx.utils.backends.backends[backend_priority[0]].load()
77
+ if hasattr(backend, "on_start_tests"):
78
+ getattr(backend, "on_start_tests")(items)
79
+
80
+ if config.getoption("--runslow"):
81
+ # --runslow given in cli: do not skip slow tests
82
+ return
83
+ skip_slow = pytest.mark.skip(reason="need --runslow option to run")
84
+ for item in items:
85
+ if "slow" in item.keywords:
86
+ item.add_marker(skip_slow)
87
+
88
+
89
+ # TODO: The warnings below need to be dealt with, but for now we silence them.
90
+ @pytest.fixture(autouse=True)
91
+ def set_warnings():
92
+ warnings.filterwarnings(
93
+ "ignore",
94
+ category=FutureWarning,
95
+ message="\n\nsingle_target_shortest_path_length",
96
+ )
97
+ warnings.filterwarnings(
98
+ "ignore",
99
+ category=FutureWarning,
100
+ message="\n\nshortest_path",
101
+ )
102
+ warnings.filterwarnings(
103
+ "ignore", category=DeprecationWarning, message="\nforest_str is deprecated"
104
+ )
105
+ warnings.filterwarnings(
106
+ "ignore", category=DeprecationWarning, message="\n\nrandom_tree"
107
+ )
108
+ warnings.filterwarnings(
109
+ "ignore", category=DeprecationWarning, message="Edmonds has been deprecated"
110
+ )
111
+ warnings.filterwarnings(
112
+ "ignore",
113
+ category=DeprecationWarning,
114
+ message="MultiDiGraph_EdgeKey has been deprecated",
115
+ )
116
+ warnings.filterwarnings(
117
+ "ignore", category=DeprecationWarning, message="\n\nThe `normalized`"
118
+ )
119
+ warnings.filterwarnings(
120
+ "ignore",
121
+ category=DeprecationWarning,
122
+ message="The function `join` is deprecated",
123
+ )
124
+ warnings.filterwarnings(
125
+ "ignore",
126
+ category=DeprecationWarning,
127
+ message="\n\nstrongly_connected_components_recursive",
128
+ )
129
+ warnings.filterwarnings(
130
+ "ignore", category=DeprecationWarning, message="\n\nall_triplets"
131
+ )
132
+ warnings.filterwarnings(
133
+ "ignore", category=DeprecationWarning, message="\n\nrandom_triad"
134
+ )
135
+ warnings.filterwarnings(
136
+ "ignore", category=DeprecationWarning, message="minimal_d_separator"
137
+ )
138
+ warnings.filterwarnings(
139
+ "ignore", category=DeprecationWarning, message="d_separated"
140
+ )
141
+ warnings.filterwarnings("ignore", category=DeprecationWarning, message="\n\nk_core")
142
+ warnings.filterwarnings(
143
+ "ignore", category=DeprecationWarning, message="\n\nk_shell"
144
+ )
145
+ warnings.filterwarnings(
146
+ "ignore", category=DeprecationWarning, message="\n\nk_crust"
147
+ )
148
+ warnings.filterwarnings(
149
+ "ignore", category=DeprecationWarning, message="\n\nk_corona"
150
+ )
151
+ warnings.filterwarnings(
152
+ "ignore", category=DeprecationWarning, message="\n\ntotal_spanning_tree_weight"
153
+ )
154
+ warnings.filterwarnings(
155
+ "ignore", category=DeprecationWarning, message=r"\n\nThe 'create=matrix'"
156
+ )
157
+
158
+
159
+ @pytest.fixture(autouse=True)
160
+ def add_nx(doctest_namespace):
161
+ doctest_namespace["nx"] = networkx
162
+
163
+
164
+ # What dependencies are installed?
165
+
166
+ try:
167
+ import numpy
168
+
169
+ has_numpy = True
170
+ except ImportError:
171
+ has_numpy = False
172
+
173
+ try:
174
+ import scipy
175
+
176
+ has_scipy = True
177
+ except ImportError:
178
+ has_scipy = False
179
+
180
+ try:
181
+ import matplotlib
182
+
183
+ has_matplotlib = True
184
+ except ImportError:
185
+ has_matplotlib = False
186
+
187
+ try:
188
+ import pandas
189
+
190
+ has_pandas = True
191
+ except ImportError:
192
+ has_pandas = False
193
+
194
+ try:
195
+ import pygraphviz
196
+
197
+ has_pygraphviz = True
198
+ except ImportError:
199
+ has_pygraphviz = False
200
+
201
+ try:
202
+ import pydot
203
+
204
+ has_pydot = True
205
+ except ImportError:
206
+ has_pydot = False
207
+
208
+ try:
209
+ import sympy
210
+
211
+ has_sympy = True
212
+ except ImportError:
213
+ has_sympy = False
214
+
215
+
216
+ # List of files that pytest should ignore
217
+
218
+ collect_ignore = []
219
+
220
+ needs_numpy = [
221
+ "algorithms/approximation/traveling_salesman.py",
222
+ "algorithms/centrality/current_flow_closeness.py",
223
+ "algorithms/node_classification.py",
224
+ "algorithms/non_randomness.py",
225
+ "algorithms/shortest_paths/dense.py",
226
+ "algorithms/tree/mst.py",
227
+ "generators/expanders.py",
228
+ "linalg/bethehessianmatrix.py",
229
+ "linalg/laplacianmatrix.py",
230
+ "utils/misc.py",
231
+ "algorithms/centrality/laplacian.py",
232
+ ]
233
+ needs_scipy = [
234
+ "algorithms/approximation/traveling_salesman.py",
235
+ "algorithms/assortativity/correlation.py",
236
+ "algorithms/assortativity/mixing.py",
237
+ "algorithms/assortativity/pairs.py",
238
+ "algorithms/bipartite/matrix.py",
239
+ "algorithms/bipartite/spectral.py",
240
+ "algorithms/centrality/current_flow_betweenness.py",
241
+ "algorithms/centrality/current_flow_betweenness_subset.py",
242
+ "algorithms/centrality/eigenvector.py",
243
+ "algorithms/centrality/katz.py",
244
+ "algorithms/centrality/laplacian.py",
245
+ "algorithms/centrality/second_order.py",
246
+ "algorithms/centrality/subgraph_alg.py",
247
+ "algorithms/communicability_alg.py",
248
+ "algorithms/community/divisive.py",
249
+ "algorithms/distance_measures.py",
250
+ "algorithms/link_analysis/hits_alg.py",
251
+ "algorithms/link_analysis/pagerank_alg.py",
252
+ "algorithms/node_classification.py",
253
+ "algorithms/similarity.py",
254
+ "algorithms/tree/mst.py",
255
+ "algorithms/walks.py",
256
+ "convert_matrix.py",
257
+ "drawing/layout.py",
258
+ "drawing/nx_pylab.py",
259
+ "generators/spectral_graph_forge.py",
260
+ "generators/expanders.py",
261
+ "linalg/algebraicconnectivity.py",
262
+ "linalg/attrmatrix.py",
263
+ "linalg/bethehessianmatrix.py",
264
+ "linalg/graphmatrix.py",
265
+ "linalg/laplacianmatrix.py",
266
+ "linalg/modularitymatrix.py",
267
+ "linalg/spectrum.py",
268
+ "utils/rcm.py",
269
+ ]
270
+ needs_matplotlib = ["drawing/nx_pylab.py"]
271
+ needs_pandas = ["convert_matrix.py"]
272
+ needs_pygraphviz = ["drawing/nx_agraph.py"]
273
+ needs_pydot = ["drawing/nx_pydot.py"]
274
+ needs_sympy = ["algorithms/polynomials.py"]
275
+
276
+ if not has_numpy:
277
+ collect_ignore += needs_numpy
278
+ if not has_scipy:
279
+ collect_ignore += needs_scipy
280
+ if not has_matplotlib:
281
+ collect_ignore += needs_matplotlib
282
+ if not has_pandas:
283
+ collect_ignore += needs_pandas
284
+ if not has_pygraphviz:
285
+ collect_ignore += needs_pygraphviz
286
+ if not has_pydot:
287
+ collect_ignore += needs_pydot
288
+ if not has_sympy:
289
+ collect_ignore += needs_sympy
llmeval-env/lib/python3.10/site-packages/networkx/convert.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions to convert NetworkX graphs to and from other formats.
2
+
3
+ The preferred way of converting data to a NetworkX graph is through the
4
+ graph constructor. The constructor calls the to_networkx_graph() function
5
+ which attempts to guess the input type and convert it automatically.
6
+
7
+ Examples
8
+ --------
9
+ Create a graph with a single edge from a dictionary of dictionaries
10
+
11
+ >>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1)
12
+ >>> G = nx.Graph(d)
13
+
14
+ See Also
15
+ --------
16
+ nx_agraph, nx_pydot
17
+ """
18
+ import warnings
19
+ from collections.abc import Collection, Generator, Iterator
20
+
21
+ import networkx as nx
22
+
23
+ __all__ = [
24
+ "to_networkx_graph",
25
+ "from_dict_of_dicts",
26
+ "to_dict_of_dicts",
27
+ "from_dict_of_lists",
28
+ "to_dict_of_lists",
29
+ "from_edgelist",
30
+ "to_edgelist",
31
+ ]
32
+
33
+
34
+ def to_networkx_graph(data, create_using=None, multigraph_input=False):
35
+ """Make a NetworkX graph from a known data structure.
36
+
37
+ The preferred way to call this is automatically
38
+ from the class constructor
39
+
40
+ >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1)
41
+ >>> G = nx.Graph(d)
42
+
43
+ instead of the equivalent
44
+
45
+ >>> G = nx.from_dict_of_dicts(d)
46
+
47
+ Parameters
48
+ ----------
49
+ data : object to be converted
50
+
51
+ Current known types are:
52
+ any NetworkX graph
53
+ dict-of-dicts
54
+ dict-of-lists
55
+ container (e.g. set, list, tuple) of edges
56
+ iterator (e.g. itertools.chain) that produces edges
57
+ generator of edges
58
+ Pandas DataFrame (row per edge)
59
+ 2D numpy array
60
+ scipy sparse array
61
+ pygraphviz agraph
62
+
63
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
64
+ Graph type to create. If graph instance, then cleared before populated.
65
+
66
+ multigraph_input : bool (default False)
67
+ If True and data is a dict_of_dicts,
68
+ try to create a multigraph assuming dict_of_dict_of_lists.
69
+ If data and create_using are both multigraphs then create
70
+ a multigraph from a multigraph.
71
+
72
+ """
73
+ # NX graph
74
+ if hasattr(data, "adj"):
75
+ try:
76
+ result = from_dict_of_dicts(
77
+ data.adj,
78
+ create_using=create_using,
79
+ multigraph_input=data.is_multigraph(),
80
+ )
81
+ # data.graph should be dict-like
82
+ result.graph.update(data.graph)
83
+ # data.nodes should be dict-like
84
+ # result.add_node_from(data.nodes.items()) possible but
85
+ # for custom node_attr_dict_factory which may be hashable
86
+ # will be unexpected behavior
87
+ for n, dd in data.nodes.items():
88
+ result._node[n].update(dd)
89
+ return result
90
+ except Exception as err:
91
+ raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err
92
+
93
+ # pygraphviz agraph
94
+ if hasattr(data, "is_strict"):
95
+ try:
96
+ return nx.nx_agraph.from_agraph(data, create_using=create_using)
97
+ except Exception as err:
98
+ raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err
99
+
100
+ # dict of dicts/lists
101
+ if isinstance(data, dict):
102
+ try:
103
+ return from_dict_of_dicts(
104
+ data, create_using=create_using, multigraph_input=multigraph_input
105
+ )
106
+ except Exception as err1:
107
+ if multigraph_input is True:
108
+ raise nx.NetworkXError(
109
+ f"converting multigraph_input raised:\n{type(err1)}: {err1}"
110
+ )
111
+ try:
112
+ return from_dict_of_lists(data, create_using=create_using)
113
+ except Exception as err2:
114
+ raise TypeError("Input is not known type.") from err2
115
+
116
+ # Pandas DataFrame
117
+ try:
118
+ import pandas as pd
119
+
120
+ if isinstance(data, pd.DataFrame):
121
+ if data.shape[0] == data.shape[1]:
122
+ try:
123
+ return nx.from_pandas_adjacency(data, create_using=create_using)
124
+ except Exception as err:
125
+ msg = "Input is not a correct Pandas DataFrame adjacency matrix."
126
+ raise nx.NetworkXError(msg) from err
127
+ else:
128
+ try:
129
+ return nx.from_pandas_edgelist(
130
+ data, edge_attr=True, create_using=create_using
131
+ )
132
+ except Exception as err:
133
+ msg = "Input is not a correct Pandas DataFrame edge-list."
134
+ raise nx.NetworkXError(msg) from err
135
+ except ImportError:
136
+ warnings.warn("pandas not found, skipping conversion test.", ImportWarning)
137
+
138
+ # numpy array
139
+ try:
140
+ import numpy as np
141
+
142
+ if isinstance(data, np.ndarray):
143
+ try:
144
+ return nx.from_numpy_array(data, create_using=create_using)
145
+ except Exception as err:
146
+ raise nx.NetworkXError(
147
+ f"Failed to interpret array as an adjacency matrix."
148
+ ) from err
149
+ except ImportError:
150
+ warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
151
+
152
+ # scipy sparse array - any format
153
+ try:
154
+ import scipy
155
+
156
+ if hasattr(data, "format"):
157
+ try:
158
+ return nx.from_scipy_sparse_array(data, create_using=create_using)
159
+ except Exception as err:
160
+ raise nx.NetworkXError(
161
+ "Input is not a correct scipy sparse array type."
162
+ ) from err
163
+ except ImportError:
164
+ warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
165
+
166
+ # Note: most general check - should remain last in order of execution
167
+ # Includes containers (e.g. list, set, dict, etc.), generators, and
168
+ # iterators (e.g. itertools.chain) of edges
169
+
170
+ if isinstance(data, Collection | Generator | Iterator):
171
+ try:
172
+ return from_edgelist(data, create_using=create_using)
173
+ except Exception as err:
174
+ raise nx.NetworkXError("Input is not a valid edge list") from err
175
+
176
+ raise nx.NetworkXError("Input is not a known data type for conversion.")
177
+
178
+
179
+ @nx._dispatchable
180
+ def to_dict_of_lists(G, nodelist=None):
181
+ """Returns adjacency representation of graph as a dictionary of lists.
182
+
183
+ Parameters
184
+ ----------
185
+ G : graph
186
+ A NetworkX graph
187
+
188
+ nodelist : list
189
+ Use only nodes specified in nodelist
190
+
191
+ Notes
192
+ -----
193
+ Completely ignores edge data for MultiGraph and MultiDiGraph.
194
+
195
+ """
196
+ if nodelist is None:
197
+ nodelist = G
198
+
199
+ d = {}
200
+ for n in nodelist:
201
+ d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist]
202
+ return d
203
+
204
+
205
+ @nx._dispatchable(graphs=None, returns_graph=True)
206
+ def from_dict_of_lists(d, create_using=None):
207
+ """Returns a graph from a dictionary of lists.
208
+
209
+ Parameters
210
+ ----------
211
+ d : dictionary of lists
212
+ A dictionary of lists adjacency representation.
213
+
214
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
215
+ Graph type to create. If graph instance, then cleared before populated.
216
+
217
+ Examples
218
+ --------
219
+ >>> dol = {0: [1]} # single edge (0,1)
220
+ >>> G = nx.from_dict_of_lists(dol)
221
+
222
+ or
223
+
224
+ >>> G = nx.Graph(dol) # use Graph constructor
225
+
226
+ """
227
+ G = nx.empty_graph(0, create_using)
228
+ G.add_nodes_from(d)
229
+ if G.is_multigraph() and not G.is_directed():
230
+ # a dict_of_lists can't show multiedges. BUT for undirected graphs,
231
+ # each edge shows up twice in the dict_of_lists.
232
+ # So we need to treat this case separately.
233
+ seen = {}
234
+ for node, nbrlist in d.items():
235
+ for nbr in nbrlist:
236
+ if nbr not in seen:
237
+ G.add_edge(node, nbr)
238
+ seen[node] = 1 # don't allow reverse edge to show up
239
+ else:
240
+ G.add_edges_from(
241
+ ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist)
242
+ )
243
+ return G
244
+
245
+
246
+ def to_dict_of_dicts(G, nodelist=None, edge_data=None):
247
+ """Returns adjacency representation of graph as a dictionary of dictionaries.
248
+
249
+ Parameters
250
+ ----------
251
+ G : graph
252
+ A NetworkX graph
253
+
254
+ nodelist : list
255
+ Use only nodes specified in nodelist
256
+
257
+ edge_data : scalar, optional
258
+ If provided, the value of the dictionary will be set to `edge_data` for
259
+ all edges. Usual values could be `1` or `True`. If `edge_data` is
260
+ `None` (the default), the edgedata in `G` is used, resulting in a
261
+ dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a
262
+ dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize
263
+ handling edge data. `edge_data` should *not* be a container.
264
+
265
+ Returns
266
+ -------
267
+ dod : dict
268
+ A nested dictionary representation of `G`. Note that the level of
269
+ nesting depends on the type of `G` and the value of `edge_data`
270
+ (see Examples).
271
+
272
+ See Also
273
+ --------
274
+ from_dict_of_dicts, to_dict_of_lists
275
+
276
+ Notes
277
+ -----
278
+ For a more custom approach to handling edge data, try::
279
+
280
+ dod = {
281
+ n: {nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items()}
282
+ for n, nbrdict in G.adj.items()
283
+ }
284
+
285
+ where `custom` returns the desired edge data for each edge between `n` and
286
+ `nbr`, given existing edge data `dd`.
287
+
288
+ Examples
289
+ --------
290
+ >>> G = nx.path_graph(3)
291
+ >>> nx.to_dict_of_dicts(G)
292
+ {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}}
293
+
294
+ Edge data is preserved by default (``edge_data=None``), resulting
295
+ in dict-of-dict-of-dicts where the innermost dictionary contains the
296
+ edge data:
297
+
298
+ >>> G = nx.Graph()
299
+ >>> G.add_edges_from(
300
+ ... [
301
+ ... (0, 1, {"weight": 1.0}),
302
+ ... (1, 2, {"weight": 2.0}),
303
+ ... (2, 0, {"weight": 1.0}),
304
+ ... ]
305
+ ... )
306
+ >>> d = nx.to_dict_of_dicts(G)
307
+ >>> d # doctest: +SKIP
308
+ {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}},
309
+ 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}},
310
+ 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}}
311
+ >>> d[1][2]["weight"]
312
+ 2.0
313
+
314
+ If `edge_data` is not `None`, edge data in the original graph (if any) is
315
+ replaced:
316
+
317
+ >>> d = nx.to_dict_of_dicts(G, edge_data=1)
318
+ >>> d
319
+ {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}}
320
+ >>> d[1][2]
321
+ 1
322
+
323
+ This also applies to MultiGraphs: edge data is preserved by default:
324
+
325
+ >>> G = nx.MultiGraph()
326
+ >>> G.add_edge(0, 1, key="a", weight=1.0)
327
+ 'a'
328
+ >>> G.add_edge(0, 1, key="b", weight=5.0)
329
+ 'b'
330
+ >>> d = nx.to_dict_of_dicts(G)
331
+ >>> d # doctest: +SKIP
332
+ {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}},
333
+ 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}}
334
+ >>> d[0][1]["b"]["weight"]
335
+ 5.0
336
+
337
+ But multi edge data is lost if `edge_data` is not `None`:
338
+
339
+ >>> d = nx.to_dict_of_dicts(G, edge_data=10)
340
+ >>> d
341
+ {0: {1: 10}, 1: {0: 10}}
342
+ """
343
+ dod = {}
344
+ if nodelist is None:
345
+ if edge_data is None:
346
+ for u, nbrdict in G.adjacency():
347
+ dod[u] = nbrdict.copy()
348
+ else: # edge_data is not None
349
+ for u, nbrdict in G.adjacency():
350
+ dod[u] = dod.fromkeys(nbrdict, edge_data)
351
+ else: # nodelist is not None
352
+ if edge_data is None:
353
+ for u in nodelist:
354
+ dod[u] = {}
355
+ for v, data in ((v, data) for v, data in G[u].items() if v in nodelist):
356
+ dod[u][v] = data
357
+ else: # nodelist and edge_data are not None
358
+ for u in nodelist:
359
+ dod[u] = {}
360
+ for v in (v for v in G[u] if v in nodelist):
361
+ dod[u][v] = edge_data
362
+ return dod
363
+
364
+
365
+ @nx._dispatchable(graphs=None, returns_graph=True)
366
+ def from_dict_of_dicts(d, create_using=None, multigraph_input=False):
367
+ """Returns a graph from a dictionary of dictionaries.
368
+
369
+ Parameters
370
+ ----------
371
+ d : dictionary of dictionaries
372
+ A dictionary of dictionaries adjacency representation.
373
+
374
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
375
+ Graph type to create. If graph instance, then cleared before populated.
376
+
377
+ multigraph_input : bool (default False)
378
+ When True, the dict `d` is assumed
379
+ to be a dict-of-dict-of-dict-of-dict structure keyed by
380
+ node to neighbor to edge keys to edge data for multi-edges.
381
+ Otherwise this routine assumes dict-of-dict-of-dict keyed by
382
+ node to neighbor to edge data.
383
+
384
+ Examples
385
+ --------
386
+ >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1)
387
+ >>> G = nx.from_dict_of_dicts(dod)
388
+
389
+ or
390
+
391
+ >>> G = nx.Graph(dod) # use Graph constructor
392
+
393
+ """
394
+ G = nx.empty_graph(0, create_using)
395
+ G.add_nodes_from(d)
396
+ # does dict d represent a MultiGraph or MultiDiGraph?
397
+ if multigraph_input:
398
+ if G.is_directed():
399
+ if G.is_multigraph():
400
+ G.add_edges_from(
401
+ (u, v, key, data)
402
+ for u, nbrs in d.items()
403
+ for v, datadict in nbrs.items()
404
+ for key, data in datadict.items()
405
+ )
406
+ else:
407
+ G.add_edges_from(
408
+ (u, v, data)
409
+ for u, nbrs in d.items()
410
+ for v, datadict in nbrs.items()
411
+ for key, data in datadict.items()
412
+ )
413
+ else: # Undirected
414
+ if G.is_multigraph():
415
+ seen = set() # don't add both directions of undirected graph
416
+ for u, nbrs in d.items():
417
+ for v, datadict in nbrs.items():
418
+ if (u, v) not in seen:
419
+ G.add_edges_from(
420
+ (u, v, key, data) for key, data in datadict.items()
421
+ )
422
+ seen.add((v, u))
423
+ else:
424
+ seen = set() # don't add both directions of undirected graph
425
+ for u, nbrs in d.items():
426
+ for v, datadict in nbrs.items():
427
+ if (u, v) not in seen:
428
+ G.add_edges_from(
429
+ (u, v, data) for key, data in datadict.items()
430
+ )
431
+ seen.add((v, u))
432
+
433
+ else: # not a multigraph to multigraph transfer
434
+ if G.is_multigraph() and not G.is_directed():
435
+ # d can have both representations u-v, v-u in dict. Only add one.
436
+ # We don't need this check for digraphs since we add both directions,
437
+ # or for Graph() since it is done implicitly (parallel edges not allowed)
438
+ seen = set()
439
+ for u, nbrs in d.items():
440
+ for v, data in nbrs.items():
441
+ if (u, v) not in seen:
442
+ G.add_edge(u, v, key=0)
443
+ G[u][v][0].update(data)
444
+ seen.add((v, u))
445
+ else:
446
+ G.add_edges_from(
447
+ ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items())
448
+ )
449
+ return G
450
+
451
+
452
+ @nx._dispatchable(preserve_edge_attrs=True)
453
+ def to_edgelist(G, nodelist=None):
454
+ """Returns a list of edges in the graph.
455
+
456
+ Parameters
457
+ ----------
458
+ G : graph
459
+ A NetworkX graph
460
+
461
+ nodelist : list
462
+ Use only nodes specified in nodelist
463
+
464
+ """
465
+ if nodelist is None:
466
+ return G.edges(data=True)
467
+ return G.edges(nodelist, data=True)
468
+
469
+
470
+ @nx._dispatchable(graphs=None, returns_graph=True)
471
+ def from_edgelist(edgelist, create_using=None):
472
+ """Returns a graph from a list of edges.
473
+
474
+ Parameters
475
+ ----------
476
+ edgelist : list or iterator
477
+ Edge tuples
478
+
479
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
480
+ Graph type to create. If graph instance, then cleared before populated.
481
+
482
+ Examples
483
+ --------
484
+ >>> edgelist = [(0, 1)] # single edge (0,1)
485
+ >>> G = nx.from_edgelist(edgelist)
486
+
487
+ or
488
+
489
+ >>> G = nx.Graph(edgelist) # use Graph constructor
490
+
491
+ """
492
+ G = nx.empty_graph(0, create_using)
493
+ G.add_edges_from(edgelist)
494
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/convert_matrix.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions to convert NetworkX graphs to and from common data containers
2
+ like numpy arrays, scipy sparse arrays, and pandas DataFrames.
3
+
4
+ The preferred way of converting data to a NetworkX graph is through the
5
+ graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph`
6
+ function which attempts to guess the input type and convert it automatically.
7
+
8
+ Examples
9
+ --------
10
+ Create a 10 node random graph from a numpy array
11
+
12
+ >>> import numpy as np
13
+ >>> rng = np.random.default_rng()
14
+ >>> a = rng.integers(low=0, high=2, size=(10, 10))
15
+ >>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph)
16
+
17
+ or equivalently:
18
+
19
+ >>> DG = nx.DiGraph(a)
20
+
21
+ which calls `from_numpy_array` internally based on the type of ``a``.
22
+
23
+ See Also
24
+ --------
25
+ nx_agraph, nx_pydot
26
+ """
27
+
28
+ import itertools
29
+ from collections import defaultdict
30
+
31
+ import networkx as nx
32
+ from networkx.utils import not_implemented_for
33
+
34
+ __all__ = [
35
+ "from_pandas_adjacency",
36
+ "to_pandas_adjacency",
37
+ "from_pandas_edgelist",
38
+ "to_pandas_edgelist",
39
+ "from_scipy_sparse_array",
40
+ "to_scipy_sparse_array",
41
+ "from_numpy_array",
42
+ "to_numpy_array",
43
+ ]
44
+
45
+
46
+ @nx._dispatchable(edge_attrs="weight")
47
+ def to_pandas_adjacency(
48
+ G,
49
+ nodelist=None,
50
+ dtype=None,
51
+ order=None,
52
+ multigraph_weight=sum,
53
+ weight="weight",
54
+ nonedge=0.0,
55
+ ):
56
+ """Returns the graph adjacency matrix as a Pandas DataFrame.
57
+
58
+ Parameters
59
+ ----------
60
+ G : graph
61
+ The NetworkX graph used to construct the Pandas DataFrame.
62
+
63
+ nodelist : list, optional
64
+ The rows and columns are ordered according to the nodes in `nodelist`.
65
+ If `nodelist` is None, then the ordering is produced by G.nodes().
66
+
67
+ multigraph_weight : {sum, min, max}, optional
68
+ An operator that determines how weights in multigraphs are handled.
69
+ The default is to sum the weights of the multiple edges.
70
+
71
+ weight : string or None, optional
72
+ The edge attribute that holds the numerical value used for
73
+ the edge weight. If an edge does not have that attribute, then the
74
+ value 1 is used instead.
75
+
76
+ nonedge : float, optional
77
+ The matrix values corresponding to nonedges are typically set to zero.
78
+ However, this could be undesirable if there are matrix values
79
+ corresponding to actual edges that also have the value zero. If so,
80
+ one might prefer nonedges to have some other value, such as nan.
81
+
82
+ Returns
83
+ -------
84
+ df : Pandas DataFrame
85
+ Graph adjacency matrix
86
+
87
+ Notes
88
+ -----
89
+ For directed graphs, entry i,j corresponds to an edge from i to j.
90
+
91
+ The DataFrame entries are assigned to the weight edge attribute. When
92
+ an edge does not have a weight attribute, the value of the entry is set to
93
+ the number 1. For multiple (parallel) edges, the values of the entries
94
+ are determined by the 'multigraph_weight' parameter. The default is to
95
+ sum the weight attributes for each of the parallel edges.
96
+
97
+ When `nodelist` does not contain every node in `G`, the matrix is built
98
+ from the subgraph of `G` that is induced by the nodes in `nodelist`.
99
+
100
+ The convention used for self-loop edges in graphs is to assign the
101
+ diagonal matrix entry value to the weight attribute of the edge
102
+ (or the number 1 if the edge has no weight attribute). If the
103
+ alternate convention of doubling the edge weight is desired the
104
+ resulting Pandas DataFrame can be modified as follows::
105
+
106
+ >>> import pandas as pd
107
+ >>> G = nx.Graph([(1, 1), (2, 2)])
108
+ >>> df = nx.to_pandas_adjacency(G)
109
+ >>> df
110
+ 1 2
111
+ 1 1.0 0.0
112
+ 2 0.0 1.0
113
+ >>> diag_idx = list(range(len(df)))
114
+ >>> df.iloc[diag_idx, diag_idx] *= 2
115
+ >>> df
116
+ 1 2
117
+ 1 2.0 0.0
118
+ 2 0.0 2.0
119
+
120
+ Examples
121
+ --------
122
+ >>> G = nx.MultiDiGraph()
123
+ >>> G.add_edge(0, 1, weight=2)
124
+ 0
125
+ >>> G.add_edge(1, 0)
126
+ 0
127
+ >>> G.add_edge(2, 2, weight=3)
128
+ 0
129
+ >>> G.add_edge(2, 2)
130
+ 1
131
+ >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int)
132
+ 0 1 2
133
+ 0 0 2 0
134
+ 1 1 0 0
135
+ 2 0 0 4
136
+
137
+ """
138
+ import pandas as pd
139
+
140
+ M = to_numpy_array(
141
+ G,
142
+ nodelist=nodelist,
143
+ dtype=dtype,
144
+ order=order,
145
+ multigraph_weight=multigraph_weight,
146
+ weight=weight,
147
+ nonedge=nonedge,
148
+ )
149
+ if nodelist is None:
150
+ nodelist = list(G)
151
+ return pd.DataFrame(data=M, index=nodelist, columns=nodelist)
152
+
153
+
154
+ @nx._dispatchable(graphs=None, returns_graph=True)
155
+ def from_pandas_adjacency(df, create_using=None):
156
+ r"""Returns a graph from Pandas DataFrame.
157
+
158
+ The Pandas DataFrame is interpreted as an adjacency matrix for the graph.
159
+
160
+ Parameters
161
+ ----------
162
+ df : Pandas DataFrame
163
+ An adjacency matrix representation of a graph
164
+
165
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
166
+ Graph type to create. If graph instance, then cleared before populated.
167
+
168
+ Notes
169
+ -----
170
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
171
+ and entry i,j of df corresponds to an edge from i to j.
172
+
173
+ If `df` has a single data type for each entry it will be converted to an
174
+ appropriate Python data type.
175
+
176
+ If you have node attributes stored in a separate dataframe `df_nodes`,
177
+ you can load those attributes to the graph `G` using the following code:
178
+
179
+ ```
180
+ df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]})
181
+ G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows())
182
+ ```
183
+
184
+ If `df` has a user-specified compound data type the names
185
+ of the data fields will be used as attribute keys in the resulting
186
+ NetworkX graph.
187
+
188
+ See Also
189
+ --------
190
+ to_pandas_adjacency
191
+
192
+ Examples
193
+ --------
194
+ Simple integer weights on edges:
195
+
196
+ >>> import pandas as pd
197
+ >>> pd.options.display.max_columns = 20
198
+ >>> df = pd.DataFrame([[1, 1], [2, 1]])
199
+ >>> df
200
+ 0 1
201
+ 0 1 1
202
+ 1 2 1
203
+ >>> G = nx.from_pandas_adjacency(df)
204
+ >>> G.name = "Graph from pandas adjacency matrix"
205
+ >>> print(G)
206
+ Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges
207
+ """
208
+
209
+ try:
210
+ df = df[df.index]
211
+ except Exception as err:
212
+ missing = list(set(df.index).difference(set(df.columns)))
213
+ msg = f"{missing} not in columns"
214
+ raise nx.NetworkXError("Columns must match Indices.", msg) from err
215
+
216
+ A = df.values
217
+ G = from_numpy_array(A, create_using=create_using)
218
+
219
+ nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False)
220
+ return G
221
+
222
+
223
+ @nx._dispatchable(preserve_edge_attrs=True)
224
+ def to_pandas_edgelist(
225
+ G,
226
+ source="source",
227
+ target="target",
228
+ nodelist=None,
229
+ dtype=None,
230
+ edge_key=None,
231
+ ):
232
+ """Returns the graph edge list as a Pandas DataFrame.
233
+
234
+ Parameters
235
+ ----------
236
+ G : graph
237
+ The NetworkX graph used to construct the Pandas DataFrame.
238
+
239
+ source : str or int, optional
240
+ A valid column name (string or integer) for the source nodes (for the
241
+ directed case).
242
+
243
+ target : str or int, optional
244
+ A valid column name (string or integer) for the target nodes (for the
245
+ directed case).
246
+
247
+ nodelist : list, optional
248
+ Use only nodes specified in nodelist
249
+
250
+ dtype : dtype, default None
251
+ Use to create the DataFrame. Data type to force.
252
+ Only a single dtype is allowed. If None, infer.
253
+
254
+ edge_key : str or int or None, optional (default=None)
255
+ A valid column name (string or integer) for the edge keys (for the
256
+ multigraph case). If None, edge keys are not stored in the DataFrame.
257
+
258
+ Returns
259
+ -------
260
+ df : Pandas DataFrame
261
+ Graph edge list
262
+
263
+ Examples
264
+ --------
265
+ >>> G = nx.Graph(
266
+ ... [
267
+ ... ("A", "B", {"cost": 1, "weight": 7}),
268
+ ... ("C", "E", {"cost": 9, "weight": 10}),
269
+ ... ]
270
+ ... )
271
+ >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"])
272
+ >>> df[["source", "target", "cost", "weight"]]
273
+ source target cost weight
274
+ 0 A B 1 7
275
+ 1 C E 9 10
276
+
277
+ >>> G = nx.MultiGraph([("A", "B", {"cost": 1}), ("A", "B", {"cost": 9})])
278
+ >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"], edge_key="ekey")
279
+ >>> df[["source", "target", "cost", "ekey"]]
280
+ source target cost ekey
281
+ 0 A B 1 0
282
+ 1 A B 9 1
283
+
284
+ """
285
+ import pandas as pd
286
+
287
+ if nodelist is None:
288
+ edgelist = G.edges(data=True)
289
+ else:
290
+ edgelist = G.edges(nodelist, data=True)
291
+ source_nodes = [s for s, _, _ in edgelist]
292
+ target_nodes = [t for _, t, _ in edgelist]
293
+
294
+ all_attrs = set().union(*(d.keys() for _, _, d in edgelist))
295
+ if source in all_attrs:
296
+ raise nx.NetworkXError(f"Source name {source!r} is an edge attr name")
297
+ if target in all_attrs:
298
+ raise nx.NetworkXError(f"Target name {target!r} is an edge attr name")
299
+
300
+ nan = float("nan")
301
+ edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs}
302
+
303
+ if G.is_multigraph() and edge_key is not None:
304
+ if edge_key in all_attrs:
305
+ raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name")
306
+ edge_keys = [k for _, _, k in G.edges(keys=True)]
307
+ edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys}
308
+ else:
309
+ edgelistdict = {source: source_nodes, target: target_nodes}
310
+
311
+ edgelistdict.update(edge_attr)
312
+ return pd.DataFrame(edgelistdict, dtype=dtype)
313
+
314
+
315
+ @nx._dispatchable(graphs=None, returns_graph=True)
316
+ def from_pandas_edgelist(
317
+ df,
318
+ source="source",
319
+ target="target",
320
+ edge_attr=None,
321
+ create_using=None,
322
+ edge_key=None,
323
+ ):
324
+ """Returns a graph from Pandas DataFrame containing an edge list.
325
+
326
+ The Pandas DataFrame should contain at least two columns of node names and
327
+ zero or more columns of edge attributes. Each row will be processed as one
328
+ edge instance.
329
+
330
+ Note: This function iterates over DataFrame.values, which is not
331
+ guaranteed to retain the data type across columns in the row. This is only
332
+ a problem if your row is entirely numeric and a mix of ints and floats. In
333
+ that case, all values will be returned as floats. See the
334
+ DataFrame.iterrows documentation for an example.
335
+
336
+ Parameters
337
+ ----------
338
+ df : Pandas DataFrame
339
+ An edge list representation of a graph
340
+
341
+ source : str or int
342
+ A valid column name (string or integer) for the source nodes (for the
343
+ directed case).
344
+
345
+ target : str or int
346
+ A valid column name (string or integer) for the target nodes (for the
347
+ directed case).
348
+
349
+ edge_attr : str or int, iterable, True, or None
350
+ A valid column name (str or int) or iterable of column names that are
351
+ used to retrieve items and add them to the graph as edge attributes.
352
+ If `True`, all of the remaining columns will be added.
353
+ If `None`, no edge attributes are added to the graph.
354
+
355
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
356
+ Graph type to create. If graph instance, then cleared before populated.
357
+
358
+ edge_key : str or None, optional (default=None)
359
+ A valid column name for the edge keys (for a MultiGraph). The values in
360
+ this column are used for the edge keys when adding edges if create_using
361
+ is a multigraph.
362
+
363
+ If you have node attributes stored in a separate dataframe `df_nodes`,
364
+ you can load those attributes to the graph `G` using the following code:
365
+
366
+ ```
367
+ df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]})
368
+ G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows())
369
+ ```
370
+
371
+ See Also
372
+ --------
373
+ to_pandas_edgelist
374
+
375
+ Examples
376
+ --------
377
+ Simple integer weights on edges:
378
+
379
+ >>> import pandas as pd
380
+ >>> pd.options.display.max_columns = 20
381
+ >>> import numpy as np
382
+ >>> rng = np.random.RandomState(seed=5)
383
+ >>> ints = rng.randint(1, 11, size=(3, 2))
384
+ >>> a = ["A", "B", "C"]
385
+ >>> b = ["D", "A", "E"]
386
+ >>> df = pd.DataFrame(ints, columns=["weight", "cost"])
387
+ >>> df[0] = a
388
+ >>> df["b"] = b
389
+ >>> df[["weight", "cost", 0, "b"]]
390
+ weight cost 0 b
391
+ 0 4 7 A D
392
+ 1 7 1 B A
393
+ 2 10 9 C E
394
+ >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"])
395
+ >>> G["E"]["C"]["weight"]
396
+ 10
397
+ >>> G["E"]["C"]["cost"]
398
+ 9
399
+ >>> edges = pd.DataFrame(
400
+ ... {
401
+ ... "source": [0, 1, 2],
402
+ ... "target": [2, 2, 3],
403
+ ... "weight": [3, 4, 5],
404
+ ... "color": ["red", "blue", "blue"],
405
+ ... }
406
+ ... )
407
+ >>> G = nx.from_pandas_edgelist(edges, edge_attr=True)
408
+ >>> G[0][2]["color"]
409
+ 'red'
410
+
411
+ Build multigraph with custom keys:
412
+
413
+ >>> edges = pd.DataFrame(
414
+ ... {
415
+ ... "source": [0, 1, 2, 0],
416
+ ... "target": [2, 2, 3, 2],
417
+ ... "my_edge_key": ["A", "B", "C", "D"],
418
+ ... "weight": [3, 4, 5, 6],
419
+ ... "color": ["red", "blue", "blue", "blue"],
420
+ ... }
421
+ ... )
422
+ >>> G = nx.from_pandas_edgelist(
423
+ ... edges,
424
+ ... edge_key="my_edge_key",
425
+ ... edge_attr=["weight", "color"],
426
+ ... create_using=nx.MultiGraph(),
427
+ ... )
428
+ >>> G[0][2]
429
+ AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}})
430
+
431
+
432
+ """
433
+ g = nx.empty_graph(0, create_using)
434
+
435
+ if edge_attr is None:
436
+ g.add_edges_from(zip(df[source], df[target]))
437
+ return g
438
+
439
+ reserved_columns = [source, target]
440
+
441
+ # Additional columns requested
442
+ attr_col_headings = []
443
+ attribute_data = []
444
+ if edge_attr is True:
445
+ attr_col_headings = [c for c in df.columns if c not in reserved_columns]
446
+ elif isinstance(edge_attr, list | tuple):
447
+ attr_col_headings = edge_attr
448
+ else:
449
+ attr_col_headings = [edge_attr]
450
+ if len(attr_col_headings) == 0:
451
+ raise nx.NetworkXError(
452
+ f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}"
453
+ )
454
+
455
+ try:
456
+ attribute_data = zip(*[df[col] for col in attr_col_headings])
457
+ except (KeyError, TypeError) as err:
458
+ msg = f"Invalid edge_attr argument: {edge_attr}"
459
+ raise nx.NetworkXError(msg) from err
460
+
461
+ if g.is_multigraph():
462
+ # => append the edge keys from the df to the bundled data
463
+ if edge_key is not None:
464
+ try:
465
+ multigraph_edge_keys = df[edge_key]
466
+ attribute_data = zip(attribute_data, multigraph_edge_keys)
467
+ except (KeyError, TypeError) as err:
468
+ msg = f"Invalid edge_key argument: {edge_key}"
469
+ raise nx.NetworkXError(msg) from err
470
+
471
+ for s, t, attrs in zip(df[source], df[target], attribute_data):
472
+ if edge_key is not None:
473
+ attrs, multigraph_edge_key = attrs
474
+ key = g.add_edge(s, t, key=multigraph_edge_key)
475
+ else:
476
+ key = g.add_edge(s, t)
477
+
478
+ g[s][t][key].update(zip(attr_col_headings, attrs))
479
+ else:
480
+ for s, t, attrs in zip(df[source], df[target], attribute_data):
481
+ g.add_edge(s, t)
482
+ g[s][t].update(zip(attr_col_headings, attrs))
483
+
484
+ return g
485
+
486
+
487
+ @nx._dispatchable(edge_attrs="weight")
488
+ def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"):
489
+ """Returns the graph adjacency matrix as a SciPy sparse array.
490
+
491
+ Parameters
492
+ ----------
493
+ G : graph
494
+ The NetworkX graph used to construct the sparse matrix.
495
+
496
+ nodelist : list, optional
497
+ The rows and columns are ordered according to the nodes in `nodelist`.
498
+ If `nodelist` is None, then the ordering is produced by G.nodes().
499
+
500
+ dtype : NumPy data-type, optional
501
+ A valid NumPy dtype used to initialize the array. If None, then the
502
+ NumPy default is used.
503
+
504
+ weight : string or None optional (default='weight')
505
+ The edge attribute that holds the numerical value used for
506
+ the edge weight. If None then all edge weights are 1.
507
+
508
+ format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
509
+ The type of the matrix to be returned (default 'csr'). For
510
+ some algorithms different implementations of sparse matrices
511
+ can perform better. See [1]_ for details.
512
+
513
+ Returns
514
+ -------
515
+ A : SciPy sparse array
516
+ Graph adjacency matrix.
517
+
518
+ Notes
519
+ -----
520
+ For directed graphs, matrix entry i,j corresponds to an edge from i to j.
521
+
522
+ The matrix entries are populated using the edge attribute held in
523
+ parameter weight. When an edge does not have that attribute, the
524
+ value of the entry is 1.
525
+
526
+ For multiple edges the matrix values are the sums of the edge weights.
527
+
528
+ When `nodelist` does not contain every node in `G`, the adjacency matrix
529
+ is built from the subgraph of `G` that is induced by the nodes in
530
+ `nodelist`.
531
+
532
+ The convention used for self-loop edges in graphs is to assign the
533
+ diagonal matrix entry value to the weight attribute of the edge
534
+ (or the number 1 if the edge has no weight attribute). If the
535
+ alternate convention of doubling the edge weight is desired the
536
+ resulting SciPy sparse array can be modified as follows:
537
+
538
+ >>> G = nx.Graph([(1, 1)])
539
+ >>> A = nx.to_scipy_sparse_array(G)
540
+ >>> print(A.todense())
541
+ [[1]]
542
+ >>> A.setdiag(A.diagonal() * 2)
543
+ >>> print(A.toarray())
544
+ [[2]]
545
+
546
+ Examples
547
+ --------
548
+ >>> G = nx.MultiDiGraph()
549
+ >>> G.add_edge(0, 1, weight=2)
550
+ 0
551
+ >>> G.add_edge(1, 0)
552
+ 0
553
+ >>> G.add_edge(2, 2, weight=3)
554
+ 0
555
+ >>> G.add_edge(2, 2)
556
+ 1
557
+ >>> S = nx.to_scipy_sparse_array(G, nodelist=[0, 1, 2])
558
+ >>> print(S.toarray())
559
+ [[0 2 0]
560
+ [1 0 0]
561
+ [0 0 4]]
562
+
563
+ References
564
+ ----------
565
+ .. [1] Scipy Dev. References, "Sparse Matrices",
566
+ https://docs.scipy.org/doc/scipy/reference/sparse.html
567
+ """
568
+ import scipy as sp
569
+
570
+ if len(G) == 0:
571
+ raise nx.NetworkXError("Graph has no nodes or edges")
572
+
573
+ if nodelist is None:
574
+ nodelist = list(G)
575
+ nlen = len(G)
576
+ else:
577
+ nlen = len(nodelist)
578
+ if nlen == 0:
579
+ raise nx.NetworkXError("nodelist has no nodes")
580
+ nodeset = set(G.nbunch_iter(nodelist))
581
+ if nlen != len(nodeset):
582
+ for n in nodelist:
583
+ if n not in G:
584
+ raise nx.NetworkXError(f"Node {n} in nodelist is not in G")
585
+ raise nx.NetworkXError("nodelist contains duplicates.")
586
+ if nlen < len(G):
587
+ G = G.subgraph(nodelist)
588
+
589
+ index = dict(zip(nodelist, range(nlen)))
590
+ coefficients = zip(
591
+ *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1))
592
+ )
593
+ try:
594
+ row, col, data = coefficients
595
+ except ValueError:
596
+ # there is no edge in the subgraph
597
+ row, col, data = [], [], []
598
+
599
+ if G.is_directed():
600
+ A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype)
601
+ else:
602
+ # symmetrize matrix
603
+ d = data + data
604
+ r = row + col
605
+ c = col + row
606
+ # selfloop entries get double counted when symmetrizing
607
+ # so we subtract the data on the diagonal
608
+ selfloops = list(nx.selfloop_edges(G, data=weight, default=1))
609
+ if selfloops:
610
+ diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops))
611
+ d += diag_data
612
+ r += diag_index
613
+ c += diag_index
614
+ A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype)
615
+ try:
616
+ return A.asformat(format)
617
+ except ValueError as err:
618
+ raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err
619
+
620
+
621
+ def _csr_gen_triples(A):
622
+ """Converts a SciPy sparse array in **Compressed Sparse Row** format to
623
+ an iterable of weighted edge triples.
624
+
625
+ """
626
+ nrows = A.shape[0]
627
+ indptr, dst_indices, data = A.indptr, A.indices, A.data
628
+ import numpy as np
629
+
630
+ src_indices = np.repeat(np.arange(nrows), np.diff(indptr))
631
+ return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())
632
+
633
+
634
+ def _csc_gen_triples(A):
635
+ """Converts a SciPy sparse array in **Compressed Sparse Column** format to
636
+ an iterable of weighted edge triples.
637
+
638
+ """
639
+ ncols = A.shape[1]
640
+ indptr, src_indices, data = A.indptr, A.indices, A.data
641
+ import numpy as np
642
+
643
+ dst_indices = np.repeat(np.arange(ncols), np.diff(indptr))
644
+ return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())
645
+
646
+
647
+ def _coo_gen_triples(A):
648
+ """Converts a SciPy sparse array in **Coordinate** format to an iterable
649
+ of weighted edge triples.
650
+
651
+ """
652
+ return zip(A.row.tolist(), A.col.tolist(), A.data.tolist())
653
+
654
+
655
+ def _dok_gen_triples(A):
656
+ """Converts a SciPy sparse array in **Dictionary of Keys** format to an
657
+ iterable of weighted edge triples.
658
+
659
+ """
660
+ for (r, c), v in A.items():
661
+ # Use `v.item()` to convert a NumPy scalar to the appropriate Python scalar
662
+ yield int(r), int(c), v.item()
663
+
664
+
665
+ def _generate_weighted_edges(A):
666
+ """Returns an iterable over (u, v, w) triples, where u and v are adjacent
667
+ vertices and w is the weight of the edge joining u and v.
668
+
669
+ `A` is a SciPy sparse array (in any format).
670
+
671
+ """
672
+ if A.format == "csr":
673
+ return _csr_gen_triples(A)
674
+ if A.format == "csc":
675
+ return _csc_gen_triples(A)
676
+ if A.format == "dok":
677
+ return _dok_gen_triples(A)
678
+ # If A is in any other format (including COO), convert it to COO format.
679
+ return _coo_gen_triples(A.tocoo())
680
+
681
+
682
+ @nx._dispatchable(graphs=None, returns_graph=True)
683
+ def from_scipy_sparse_array(
684
+ A, parallel_edges=False, create_using=None, edge_attribute="weight"
685
+ ):
686
+ """Creates a new graph from an adjacency matrix given as a SciPy sparse
687
+ array.
688
+
689
+ Parameters
690
+ ----------
691
+ A: scipy.sparse array
692
+ An adjacency matrix representation of a graph
693
+
694
+ parallel_edges : Boolean
695
+ If this is True, `create_using` is a multigraph, and `A` is an
696
+ integer matrix, then entry *(i, j)* in the matrix is interpreted as the
697
+ number of parallel edges joining vertices *i* and *j* in the graph.
698
+ If it is False, then the entries in the matrix are interpreted as
699
+ the weight of a single edge joining the vertices.
700
+
701
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
702
+ Graph type to create. If graph instance, then cleared before populated.
703
+
704
+ edge_attribute: string
705
+ Name of edge attribute to store matrix numeric value. The data will
706
+ have the same type as the matrix entry (int, float, (real,imag)).
707
+
708
+ Notes
709
+ -----
710
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
711
+ and entry i,j of A corresponds to an edge from i to j.
712
+
713
+ If `create_using` is :class:`networkx.MultiGraph` or
714
+ :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
715
+ entries of `A` are of type :class:`int`, then this function returns a
716
+ multigraph (constructed from `create_using`) with parallel edges.
717
+ In this case, `edge_attribute` will be ignored.
718
+
719
+ If `create_using` indicates an undirected multigraph, then only the edges
720
+ indicated by the upper triangle of the matrix `A` will be added to the
721
+ graph.
722
+
723
+ Examples
724
+ --------
725
+ >>> import scipy as sp
726
+ >>> A = sp.sparse.eye(2, 2, 1)
727
+ >>> G = nx.from_scipy_sparse_array(A)
728
+
729
+ If `create_using` indicates a multigraph and the matrix has only integer
730
+ entries and `parallel_edges` is False, then the entries will be treated
731
+ as weights for edges joining the nodes (without creating parallel edges):
732
+
733
+ >>> A = sp.sparse.csr_array([[1, 1], [1, 2]])
734
+ >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph)
735
+ >>> G[1][1]
736
+ AtlasView({0: {'weight': 2}})
737
+
738
+ If `create_using` indicates a multigraph and the matrix has only integer
739
+ entries and `parallel_edges` is True, then the entries will be treated
740
+ as the number of parallel edges joining those two vertices:
741
+
742
+ >>> A = sp.sparse.csr_array([[1, 1], [1, 2]])
743
+ >>> G = nx.from_scipy_sparse_array(A, parallel_edges=True, create_using=nx.MultiGraph)
744
+ >>> G[1][1]
745
+ AtlasView({0: {'weight': 1}, 1: {'weight': 1}})
746
+
747
+ """
748
+ G = nx.empty_graph(0, create_using)
749
+ n, m = A.shape
750
+ if n != m:
751
+ raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
752
+ # Make sure we get even the isolated nodes of the graph.
753
+ G.add_nodes_from(range(n))
754
+ # Create an iterable over (u, v, w) triples and for each triple, add an
755
+ # edge from u to v with weight w.
756
+ triples = _generate_weighted_edges(A)
757
+ # If the entries in the adjacency matrix are integers, the graph is a
758
+ # multigraph, and parallel_edges is True, then create parallel edges, each
759
+ # with weight 1, for each entry in the adjacency matrix. Otherwise, create
760
+ # one edge for each positive entry in the adjacency matrix and set the
761
+ # weight of that edge to be the entry in the matrix.
762
+ if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges:
763
+ chain = itertools.chain.from_iterable
764
+ # The following line is equivalent to:
765
+ #
766
+ # for (u, v) in edges:
767
+ # for d in range(A[u, v]):
768
+ # G.add_edge(u, v, weight=1)
769
+ #
770
+ triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
771
+ # If we are creating an undirected multigraph, only add the edges from the
772
+ # upper triangle of the matrix. Otherwise, add all the edges. This relies
773
+ # on the fact that the vertices created in the
774
+ # `_generated_weighted_edges()` function are actually the row/column
775
+ # indices for the matrix `A`.
776
+ #
777
+ # Without this check, we run into a problem where each edge is added twice
778
+ # when `G.add_weighted_edges_from()` is invoked below.
779
+ if G.is_multigraph() and not G.is_directed():
780
+ triples = ((u, v, d) for u, v, d in triples if u <= v)
781
+ G.add_weighted_edges_from(triples, weight=edge_attribute)
782
+ return G
783
+
784
+
785
+ @nx._dispatchable(edge_attrs="weight") # edge attrs may also be obtained from `dtype`
786
+ def to_numpy_array(
787
+ G,
788
+ nodelist=None,
789
+ dtype=None,
790
+ order=None,
791
+ multigraph_weight=sum,
792
+ weight="weight",
793
+ nonedge=0.0,
794
+ ):
795
+ """Returns the graph adjacency matrix as a NumPy array.
796
+
797
+ Parameters
798
+ ----------
799
+ G : graph
800
+ The NetworkX graph used to construct the NumPy array.
801
+
802
+ nodelist : list, optional
803
+ The rows and columns are ordered according to the nodes in `nodelist`.
804
+ If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``.
805
+
806
+ dtype : NumPy data type, optional
807
+ A NumPy data type used to initialize the array. If None, then the NumPy
808
+ default is used. The dtype can be structured if `weight=None`, in which
809
+ case the dtype field names are used to look up edge attributes. The
810
+ result is a structured array where each named field in the dtype
811
+ corresponds to the adjacency for that edge attribute. See examples for
812
+ details.
813
+
814
+ order : {'C', 'F'}, optional
815
+ Whether to store multidimensional data in C- or Fortran-contiguous
816
+ (row- or column-wise) order in memory. If None, then the NumPy default
817
+ is used.
818
+
819
+ multigraph_weight : callable, optional
820
+ An function that determines how weights in multigraphs are handled.
821
+ The function should accept a sequence of weights and return a single
822
+ value. The default is to sum the weights of the multiple edges.
823
+
824
+ weight : string or None optional (default = 'weight')
825
+ The edge attribute that holds the numerical value used for
826
+ the edge weight. If an edge does not have that attribute, then the
827
+ value 1 is used instead. `weight` must be ``None`` if a structured
828
+ dtype is used.
829
+
830
+ nonedge : array_like (default = 0.0)
831
+ The value used to represent non-edges in the adjacency matrix.
832
+ The array values corresponding to nonedges are typically set to zero.
833
+ However, this could be undesirable if there are array values
834
+ corresponding to actual edges that also have the value zero. If so,
835
+ one might prefer nonedges to have some other value, such as ``nan``.
836
+
837
+ Returns
838
+ -------
839
+ A : NumPy ndarray
840
+ Graph adjacency matrix
841
+
842
+ Raises
843
+ ------
844
+ NetworkXError
845
+ If `dtype` is a structured dtype and `G` is a multigraph
846
+ ValueError
847
+ If `dtype` is a structured dtype and `weight` is not `None`
848
+
849
+ See Also
850
+ --------
851
+ from_numpy_array
852
+
853
+ Notes
854
+ -----
855
+ For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``.
856
+
857
+ Entries in the adjacency matrix are given by the `weight` edge attribute.
858
+ When an edge does not have a weight attribute, the value of the entry is
859
+ set to the number 1. For multiple (parallel) edges, the values of the
860
+ entries are determined by the `multigraph_weight` parameter. The default is
861
+ to sum the weight attributes for each of the parallel edges.
862
+
863
+ When `nodelist` does not contain every node in `G`, the adjacency matrix is
864
+ built from the subgraph of `G` that is induced by the nodes in `nodelist`.
865
+
866
+ The convention used for self-loop edges in graphs is to assign the
867
+ diagonal array entry value to the weight attribute of the edge
868
+ (or the number 1 if the edge has no weight attribute). If the
869
+ alternate convention of doubling the edge weight is desired the
870
+ resulting NumPy array can be modified as follows:
871
+
872
+ >>> import numpy as np
873
+ >>> G = nx.Graph([(1, 1)])
874
+ >>> A = nx.to_numpy_array(G)
875
+ >>> A
876
+ array([[1.]])
877
+ >>> A[np.diag_indices_from(A)] *= 2
878
+ >>> A
879
+ array([[2.]])
880
+
881
+ Examples
882
+ --------
883
+ >>> G = nx.MultiDiGraph()
884
+ >>> G.add_edge(0, 1, weight=2)
885
+ 0
886
+ >>> G.add_edge(1, 0)
887
+ 0
888
+ >>> G.add_edge(2, 2, weight=3)
889
+ 0
890
+ >>> G.add_edge(2, 2)
891
+ 1
892
+ >>> nx.to_numpy_array(G, nodelist=[0, 1, 2])
893
+ array([[0., 2., 0.],
894
+ [1., 0., 0.],
895
+ [0., 0., 4.]])
896
+
897
+ When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist`
898
+ and their edges are not included in the adjacency matrix. Here is an example:
899
+
900
+ >>> G = nx.Graph()
901
+ >>> G.add_edge(3, 1)
902
+ >>> G.add_edge(2, 0)
903
+ >>> G.add_edge(2, 1)
904
+ >>> G.add_edge(3, 0)
905
+ >>> nx.to_numpy_array(G, nodelist=[1, 2, 3])
906
+ array([[0., 1., 1.],
907
+ [1., 0., 0.],
908
+ [1., 0., 0.]])
909
+
910
+ This function can also be used to create adjacency matrices for multiple
911
+ edge attributes with structured dtypes:
912
+
913
+ >>> G = nx.Graph()
914
+ >>> G.add_edge(0, 1, weight=10)
915
+ >>> G.add_edge(1, 2, cost=5)
916
+ >>> G.add_edge(2, 3, weight=3, cost=-4.0)
917
+ >>> dtype = np.dtype([("weight", int), ("cost", float)])
918
+ >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None)
919
+ >>> A["weight"]
920
+ array([[ 0, 10, 0, 0],
921
+ [10, 0, 1, 0],
922
+ [ 0, 1, 0, 3],
923
+ [ 0, 0, 3, 0]])
924
+ >>> A["cost"]
925
+ array([[ 0., 1., 0., 0.],
926
+ [ 1., 0., 5., 0.],
927
+ [ 0., 5., 0., -4.],
928
+ [ 0., 0., -4., 0.]])
929
+
930
+ As stated above, the argument "nonedge" is useful especially when there are
931
+ actually edges with weight 0 in the graph. Setting a nonedge value different than 0,
932
+ makes it much clearer to differentiate such 0-weighted edges and actual nonedge values.
933
+
934
+ >>> G = nx.Graph()
935
+ >>> G.add_edge(3, 1, weight=2)
936
+ >>> G.add_edge(2, 0, weight=0)
937
+ >>> G.add_edge(2, 1, weight=0)
938
+ >>> G.add_edge(3, 0, weight=1)
939
+ >>> nx.to_numpy_array(G, nonedge=-1.0)
940
+ array([[-1., 2., -1., 1.],
941
+ [ 2., -1., 0., -1.],
942
+ [-1., 0., -1., 0.],
943
+ [ 1., -1., 0., -1.]])
944
+ """
945
+ import numpy as np
946
+
947
+ if nodelist is None:
948
+ nodelist = list(G)
949
+ nlen = len(nodelist)
950
+
951
+ # Input validation
952
+ nodeset = set(nodelist)
953
+ if nodeset - set(G):
954
+ raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G")
955
+ if len(nodeset) < nlen:
956
+ raise nx.NetworkXError("nodelist contains duplicates.")
957
+
958
+ A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order)
959
+
960
+ # Corner cases: empty nodelist or graph without any edges
961
+ if nlen == 0 or G.number_of_edges() == 0:
962
+ return A
963
+
964
+ # If dtype is structured and weight is None, use dtype field names as
965
+ # edge attributes
966
+ edge_attrs = None # Only single edge attribute by default
967
+ if A.dtype.names:
968
+ if weight is None:
969
+ edge_attrs = dtype.names
970
+ else:
971
+ raise ValueError(
972
+ "Specifying `weight` not supported for structured dtypes\n."
973
+ "To create adjacency matrices from structured dtypes, use `weight=None`."
974
+ )
975
+
976
+ # Map nodes to row/col in matrix
977
+ idx = dict(zip(nodelist, range(nlen)))
978
+ if len(nodelist) < len(G):
979
+ G = G.subgraph(nodelist).copy()
980
+
981
+ # Collect all edge weights and reduce with `multigraph_weights`
982
+ if G.is_multigraph():
983
+ if edge_attrs:
984
+ raise nx.NetworkXError(
985
+ "Structured arrays are not supported for MultiGraphs"
986
+ )
987
+ d = defaultdict(list)
988
+ for u, v, wt in G.edges(data=weight, default=1.0):
989
+ d[(idx[u], idx[v])].append(wt)
990
+ i, j = np.array(list(d.keys())).T # indices
991
+ wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights
992
+ else:
993
+ i, j, wts = [], [], []
994
+
995
+ # Special branch: multi-attr adjacency from structured dtypes
996
+ if edge_attrs:
997
+ # Extract edges with all data
998
+ for u, v, data in G.edges(data=True):
999
+ i.append(idx[u])
1000
+ j.append(idx[v])
1001
+ wts.append(data)
1002
+ # Map each attribute to the appropriate named field in the
1003
+ # structured dtype
1004
+ for attr in edge_attrs:
1005
+ attr_data = [wt.get(attr, 1.0) for wt in wts]
1006
+ A[attr][i, j] = attr_data
1007
+ if not G.is_directed():
1008
+ A[attr][j, i] = attr_data
1009
+ return A
1010
+
1011
+ for u, v, wt in G.edges(data=weight, default=1.0):
1012
+ i.append(idx[u])
1013
+ j.append(idx[v])
1014
+ wts.append(wt)
1015
+
1016
+ # Set array values with advanced indexing
1017
+ A[i, j] = wts
1018
+ if not G.is_directed():
1019
+ A[j, i] = wts
1020
+
1021
+ return A
1022
+
1023
+
1024
+ @nx._dispatchable(graphs=None, returns_graph=True)
1025
+ def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"):
1026
+ """Returns a graph from a 2D NumPy array.
1027
+
1028
+ The 2D NumPy array is interpreted as an adjacency matrix for the graph.
1029
+
1030
+ Parameters
1031
+ ----------
1032
+ A : a 2D numpy.ndarray
1033
+ An adjacency matrix representation of a graph
1034
+
1035
+ parallel_edges : Boolean
1036
+ If this is True, `create_using` is a multigraph, and `A` is an
1037
+ integer array, then entry *(i, j)* in the array is interpreted as the
1038
+ number of parallel edges joining vertices *i* and *j* in the graph.
1039
+ If it is False, then the entries in the array are interpreted as
1040
+ the weight of a single edge joining the vertices.
1041
+
1042
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
1043
+ Graph type to create. If graph instance, then cleared before populated.
1044
+
1045
+ edge_attr : String, optional (default="weight")
1046
+ The attribute to which the array values are assigned on each edge. If
1047
+ it is None, edge attributes will not be assigned.
1048
+
1049
+ Notes
1050
+ -----
1051
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
1052
+ and entry i,j of A corresponds to an edge from i to j.
1053
+
1054
+ If `create_using` is :class:`networkx.MultiGraph` or
1055
+ :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
1056
+ entries of `A` are of type :class:`int`, then this function returns a
1057
+ multigraph (of the same type as `create_using`) with parallel edges.
1058
+
1059
+ If `create_using` indicates an undirected multigraph, then only the edges
1060
+ indicated by the upper triangle of the array `A` will be added to the
1061
+ graph.
1062
+
1063
+ If `edge_attr` is Falsy (False or None), edge attributes will not be
1064
+ assigned, and the array data will be treated like a binary mask of
1065
+ edge presence or absence. Otherwise, the attributes will be assigned
1066
+ as follows:
1067
+
1068
+ If the NumPy array has a single data type for each array entry it
1069
+ will be converted to an appropriate Python data type.
1070
+
1071
+ If the NumPy array has a user-specified compound data type the names
1072
+ of the data fields will be used as attribute keys in the resulting
1073
+ NetworkX graph.
1074
+
1075
+ See Also
1076
+ --------
1077
+ to_numpy_array
1078
+
1079
+ Examples
1080
+ --------
1081
+ Simple integer weights on edges:
1082
+
1083
+ >>> import numpy as np
1084
+ >>> A = np.array([[1, 1], [2, 1]])
1085
+ >>> G = nx.from_numpy_array(A)
1086
+ >>> G.edges(data=True)
1087
+ EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})])
1088
+
1089
+ If `create_using` indicates a multigraph and the array has only integer
1090
+ entries and `parallel_edges` is False, then the entries will be treated
1091
+ as weights for edges joining the nodes (without creating parallel edges):
1092
+
1093
+ >>> A = np.array([[1, 1], [1, 2]])
1094
+ >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph)
1095
+ >>> G[1][1]
1096
+ AtlasView({0: {'weight': 2}})
1097
+
1098
+ If `create_using` indicates a multigraph and the array has only integer
1099
+ entries and `parallel_edges` is True, then the entries will be treated
1100
+ as the number of parallel edges joining those two vertices:
1101
+
1102
+ >>> A = np.array([[1, 1], [1, 2]])
1103
+ >>> temp = nx.MultiGraph()
1104
+ >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp)
1105
+ >>> G[1][1]
1106
+ AtlasView({0: {'weight': 1}, 1: {'weight': 1}})
1107
+
1108
+ User defined compound data type on edges:
1109
+
1110
+ >>> dt = [("weight", float), ("cost", int)]
1111
+ >>> A = np.array([[(1.0, 2)]], dtype=dt)
1112
+ >>> G = nx.from_numpy_array(A)
1113
+ >>> G.edges()
1114
+ EdgeView([(0, 0)])
1115
+ >>> G[0][0]["cost"]
1116
+ 2
1117
+ >>> G[0][0]["weight"]
1118
+ 1.0
1119
+
1120
+ """
1121
+ kind_to_python_type = {
1122
+ "f": float,
1123
+ "i": int,
1124
+ "u": int,
1125
+ "b": bool,
1126
+ "c": complex,
1127
+ "S": str,
1128
+ "U": str,
1129
+ "V": "void",
1130
+ }
1131
+ G = nx.empty_graph(0, create_using)
1132
+ if A.ndim != 2:
1133
+ raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}")
1134
+ n, m = A.shape
1135
+ if n != m:
1136
+ raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
1137
+ dt = A.dtype
1138
+ try:
1139
+ python_type = kind_to_python_type[dt.kind]
1140
+ except Exception as err:
1141
+ raise TypeError(f"Unknown numpy data type: {dt}") from err
1142
+
1143
+ # Make sure we get even the isolated nodes of the graph.
1144
+ G.add_nodes_from(range(n))
1145
+ # Get a list of all the entries in the array with nonzero entries. These
1146
+ # coordinates become edges in the graph. (convert to int from np.int64)
1147
+ edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero()))
1148
+ # handle numpy constructed data type
1149
+ if python_type == "void":
1150
+ # Sort the fields by their offset, then by dtype, then by name.
1151
+ fields = sorted(
1152
+ (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items()
1153
+ )
1154
+ triples = (
1155
+ (
1156
+ u,
1157
+ v,
1158
+ {}
1159
+ if edge_attr in [False, None]
1160
+ else {
1161
+ name: kind_to_python_type[dtype.kind](val)
1162
+ for (_, dtype, name), val in zip(fields, A[u, v])
1163
+ },
1164
+ )
1165
+ for u, v in edges
1166
+ )
1167
+ # If the entries in the adjacency matrix are integers, the graph is a
1168
+ # multigraph, and parallel_edges is True, then create parallel edges, each
1169
+ # with weight 1, for each entry in the adjacency matrix. Otherwise, create
1170
+ # one edge for each positive entry in the adjacency matrix and set the
1171
+ # weight of that edge to be the entry in the matrix.
1172
+ elif python_type is int and G.is_multigraph() and parallel_edges:
1173
+ chain = itertools.chain.from_iterable
1174
+ # The following line is equivalent to:
1175
+ #
1176
+ # for (u, v) in edges:
1177
+ # for d in range(A[u, v]):
1178
+ # G.add_edge(u, v, weight=1)
1179
+ #
1180
+ if edge_attr in [False, None]:
1181
+ triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges)
1182
+ else:
1183
+ triples = chain(
1184
+ ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges
1185
+ )
1186
+ else: # basic data type
1187
+ if edge_attr in [False, None]:
1188
+ triples = ((u, v, {}) for u, v in edges)
1189
+ else:
1190
+ triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges)
1191
+ # If we are creating an undirected multigraph, only add the edges from the
1192
+ # upper triangle of the matrix. Otherwise, add all the edges. This relies
1193
+ # on the fact that the vertices created in the
1194
+ # `_generated_weighted_edges()` function are actually the row/column
1195
+ # indices for the matrix `A`.
1196
+ #
1197
+ # Without this check, we run into a problem where each edge is added twice
1198
+ # when `G.add_edges_from()` is invoked below.
1199
+ if G.is_multigraph() and not G.is_directed():
1200
+ triples = ((u, v, d) for u, v, d in triples if u <= v)
1201
+ G.add_edges_from(triples)
1202
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/exception.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ **********
3
+ Exceptions
4
+ **********
5
+
6
+ Base exceptions and errors for NetworkX.
7
+ """
8
+
9
+ __all__ = [
10
+ "HasACycle",
11
+ "NodeNotFound",
12
+ "PowerIterationFailedConvergence",
13
+ "ExceededMaxIterations",
14
+ "AmbiguousSolution",
15
+ "NetworkXAlgorithmError",
16
+ "NetworkXException",
17
+ "NetworkXError",
18
+ "NetworkXNoCycle",
19
+ "NetworkXNoPath",
20
+ "NetworkXNotImplemented",
21
+ "NetworkXPointlessConcept",
22
+ "NetworkXUnbounded",
23
+ "NetworkXUnfeasible",
24
+ ]
25
+
26
+
27
+ class NetworkXException(Exception):
28
+ """Base class for exceptions in NetworkX."""
29
+
30
+
31
+ class NetworkXError(NetworkXException):
32
+ """Exception for a serious error in NetworkX"""
33
+
34
+
35
+ class NetworkXPointlessConcept(NetworkXException):
36
+ """Raised when a null graph is provided as input to an algorithm
37
+ that cannot use it.
38
+
39
+ The null graph is sometimes considered a pointless concept [1]_,
40
+ thus the name of the exception.
41
+
42
+ References
43
+ ----------
44
+ .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless
45
+ Concept?" In Graphs and Combinatorics Conference, George
46
+ Washington University. New York: Springer-Verlag, 1973.
47
+
48
+ """
49
+
50
+
51
+ class NetworkXAlgorithmError(NetworkXException):
52
+ """Exception for unexpected termination of algorithms."""
53
+
54
+
55
+ class NetworkXUnfeasible(NetworkXAlgorithmError):
56
+ """Exception raised by algorithms trying to solve a problem
57
+ instance that has no feasible solution."""
58
+
59
+
60
+ class NetworkXNoPath(NetworkXUnfeasible):
61
+ """Exception for algorithms that should return a path when running
62
+ on graphs where such a path does not exist."""
63
+
64
+
65
+ class NetworkXNoCycle(NetworkXUnfeasible):
66
+ """Exception for algorithms that should return a cycle when running
67
+ on graphs where such a cycle does not exist."""
68
+
69
+
70
+ class HasACycle(NetworkXException):
71
+ """Raised if a graph has a cycle when an algorithm expects that it
72
+ will have no cycles.
73
+
74
+ """
75
+
76
+
77
+ class NetworkXUnbounded(NetworkXAlgorithmError):
78
+ """Exception raised by algorithms trying to solve a maximization
79
+ or a minimization problem instance that is unbounded."""
80
+
81
+
82
+ class NetworkXNotImplemented(NetworkXException):
83
+ """Exception raised by algorithms not implemented for a type of graph."""
84
+
85
+
86
+ class NodeNotFound(NetworkXException):
87
+ """Exception raised if requested node is not present in the graph"""
88
+
89
+
90
+ class AmbiguousSolution(NetworkXException):
91
+ """Raised if more than one valid solution exists for an intermediary step
92
+ of an algorithm.
93
+
94
+ In the face of ambiguity, refuse the temptation to guess.
95
+ This may occur, for example, when trying to determine the
96
+ bipartite node sets in a disconnected bipartite graph when
97
+ computing bipartite matchings.
98
+
99
+ """
100
+
101
+
102
+ class ExceededMaxIterations(NetworkXException):
103
+ """Raised if a loop iterates too many times without breaking.
104
+
105
+ This may occur, for example, in an algorithm that computes
106
+ progressively better approximations to a value but exceeds an
107
+ iteration bound specified by the user.
108
+
109
+ """
110
+
111
+
112
+ class PowerIterationFailedConvergence(ExceededMaxIterations):
113
+ """Raised when the power iteration method fails to converge within a
114
+ specified iteration limit.
115
+
116
+ `num_iterations` is the number of iterations that have been
117
+ completed when this exception was raised.
118
+
119
+ """
120
+
121
+ def __init__(self, num_iterations, *args, **kw):
122
+ msg = f"power iteration failed to converge within {num_iterations} iterations"
123
+ exception_message = msg
124
+ superinit = super().__init__
125
+ superinit(self, exception_message, *args, **kw)
llmeval-env/lib/python3.10/site-packages/networkx/generators/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A package for generating various graphs in networkx.
3
+
4
+ """
5
+ from networkx.generators.atlas import *
6
+ from networkx.generators.classic import *
7
+ from networkx.generators.cographs import *
8
+ from networkx.generators.community import *
9
+ from networkx.generators.degree_seq import *
10
+ from networkx.generators.directed import *
11
+ from networkx.generators.duplication import *
12
+ from networkx.generators.ego import *
13
+ from networkx.generators.expanders import *
14
+ from networkx.generators.geometric import *
15
+ from networkx.generators.harary_graph import *
16
+ from networkx.generators.internet_as_graphs import *
17
+ from networkx.generators.intersection import *
18
+ from networkx.generators.interval_graph import *
19
+ from networkx.generators.joint_degree_seq import *
20
+ from networkx.generators.lattice import *
21
+ from networkx.generators.line import *
22
+ from networkx.generators.mycielski import *
23
+ from networkx.generators.nonisomorphic_trees import *
24
+ from networkx.generators.random_clustered import *
25
+ from networkx.generators.random_graphs import *
26
+ from networkx.generators.small import *
27
+ from networkx.generators.social import *
28
+ from networkx.generators.spectral_graph_forge import *
29
+ from networkx.generators.stochastic import *
30
+ from networkx.generators.sudoku import *
31
+ from networkx.generators.time_series import *
32
+ from networkx.generators.trees import *
33
+ from networkx.generators.triads import *
llmeval-env/lib/python3.10/site-packages/networkx/generators/atlas.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generators for the small graph atlas.
3
+ """
4
+ import gzip
5
+ import importlib.resources
6
+ import os
7
+ import os.path
8
+ from itertools import islice
9
+
10
+ import networkx as nx
11
+
12
+ __all__ = ["graph_atlas", "graph_atlas_g"]
13
+
14
+ #: The total number of graphs in the atlas.
15
+ #:
16
+ #: The graphs are labeled starting from 0 and extending to (but not
17
+ #: including) this number.
18
+ NUM_GRAPHS = 1253
19
+
20
+ #: The path to the data file containing the graph edge lists.
21
+ #:
22
+ #: This is the absolute path of the gzipped text file containing the
23
+ #: edge list for each graph in the atlas. The file contains one entry
24
+ #: per graph in the atlas, in sequential order, starting from graph
25
+ #: number 0 and extending through graph number 1252 (see
26
+ #: :data:`NUM_GRAPHS`). Each entry looks like
27
+ #:
28
+ #: .. sourcecode:: text
29
+ #:
30
+ #: GRAPH 6
31
+ #: NODES 3
32
+ #: 0 1
33
+ #: 0 2
34
+ #:
35
+ #: where the first two lines are the graph's index in the atlas and the
36
+ #: number of nodes in the graph, and the remaining lines are the edge
37
+ #: list.
38
+ #:
39
+ #: This file was generated from a Python list of graphs via code like
40
+ #: the following::
41
+ #:
42
+ #: import gzip
43
+ #: from networkx.generators.atlas import graph_atlas_g
44
+ #: from networkx.readwrite.edgelist import write_edgelist
45
+ #:
46
+ #: with gzip.open('atlas.dat.gz', 'wb') as f:
47
+ #: for i, G in enumerate(graph_atlas_g()):
48
+ #: f.write(bytes(f'GRAPH {i}\n', encoding='utf-8'))
49
+ #: f.write(bytes(f'NODES {len(G)}\n', encoding='utf-8'))
50
+ #: write_edgelist(G, f, data=False)
51
+ #:
52
+
53
+ # Path to the atlas file
54
+ ATLAS_FILE = importlib.resources.files("networkx.generators") / "atlas.dat.gz"
55
+
56
+
57
+ def _generate_graphs():
58
+ """Sequentially read the file containing the edge list data for the
59
+ graphs in the atlas and generate the graphs one at a time.
60
+
61
+ This function reads the file given in :data:`.ATLAS_FILE`.
62
+
63
+ """
64
+ with gzip.open(ATLAS_FILE, "rb") as f:
65
+ line = f.readline()
66
+ while line and line.startswith(b"GRAPH"):
67
+ # The first two lines of each entry tell us the index of the
68
+ # graph in the list and the number of nodes in the graph.
69
+ # They look like this:
70
+ #
71
+ # GRAPH 3
72
+ # NODES 2
73
+ #
74
+ graph_index = int(line[6:].rstrip())
75
+ line = f.readline()
76
+ num_nodes = int(line[6:].rstrip())
77
+ # The remaining lines contain the edge list, until the next
78
+ # GRAPH line (or until the end of the file).
79
+ edgelist = []
80
+ line = f.readline()
81
+ while line and not line.startswith(b"GRAPH"):
82
+ edgelist.append(line.rstrip())
83
+ line = f.readline()
84
+ G = nx.Graph()
85
+ G.name = f"G{graph_index}"
86
+ G.add_nodes_from(range(num_nodes))
87
+ G.add_edges_from(tuple(map(int, e.split())) for e in edgelist)
88
+ yield G
89
+
90
+
91
+ @nx._dispatchable(graphs=None, returns_graph=True)
92
+ def graph_atlas(i):
93
+ """Returns graph number `i` from the Graph Atlas.
94
+
95
+ For more information, see :func:`.graph_atlas_g`.
96
+
97
+ Parameters
98
+ ----------
99
+ i : int
100
+ The index of the graph from the atlas to get. The graph at index
101
+ 0 is assumed to be the null graph.
102
+
103
+ Returns
104
+ -------
105
+ list
106
+ A list of :class:`~networkx.Graph` objects, the one at index *i*
107
+ corresponding to the graph *i* in the Graph Atlas.
108
+
109
+ See also
110
+ --------
111
+ graph_atlas_g
112
+
113
+ Notes
114
+ -----
115
+ The time required by this function increases linearly with the
116
+ argument `i`, since it reads a large file sequentially in order to
117
+ generate the graph [1]_.
118
+
119
+ References
120
+ ----------
121
+ .. [1] Ronald C. Read and Robin J. Wilson, *An Atlas of Graphs*.
122
+ Oxford University Press, 1998.
123
+
124
+ """
125
+ if not (0 <= i < NUM_GRAPHS):
126
+ raise ValueError(f"index must be between 0 and {NUM_GRAPHS}")
127
+ return next(islice(_generate_graphs(), i, None))
128
+
129
+
130
+ @nx._dispatchable(graphs=None, returns_graph=True)
131
+ def graph_atlas_g():
132
+ """Returns the list of all graphs with up to seven nodes named in the
133
+ Graph Atlas.
134
+
135
+ The graphs are listed in increasing order by
136
+
137
+ 1. number of nodes,
138
+ 2. number of edges,
139
+ 3. degree sequence (for example 111223 < 112222),
140
+ 4. number of automorphisms,
141
+
142
+ in that order, with three exceptions as described in the *Notes*
143
+ section below. This causes the list to correspond with the index of
144
+ the graphs in the Graph Atlas [atlas]_, with the first graph,
145
+ ``G[0]``, being the null graph.
146
+
147
+ Returns
148
+ -------
149
+ list
150
+ A list of :class:`~networkx.Graph` objects, the one at index *i*
151
+ corresponding to the graph *i* in the Graph Atlas.
152
+
153
+ See also
154
+ --------
155
+ graph_atlas
156
+
157
+ Notes
158
+ -----
159
+ This function may be expensive in both time and space, since it
160
+ reads a large file sequentially in order to populate the list.
161
+
162
+ Although the NetworkX atlas functions match the order of graphs
163
+ given in the "Atlas of Graphs" book, there are (at least) three
164
+ errors in the ordering described in the book. The following three
165
+ pairs of nodes violate the lexicographically nondecreasing sorted
166
+ degree sequence rule:
167
+
168
+ - graphs 55 and 56 with degree sequences 001111 and 000112,
169
+ - graphs 1007 and 1008 with degree sequences 3333444 and 3333336,
170
+ - graphs 1012 and 1213 with degree sequences 1244555 and 1244456.
171
+
172
+ References
173
+ ----------
174
+ .. [atlas] Ronald C. Read and Robin J. Wilson,
175
+ *An Atlas of Graphs*.
176
+ Oxford University Press, 1998.
177
+
178
+ """
179
+ return list(_generate_graphs())
llmeval-env/lib/python3.10/site-packages/networkx/generators/classic.py ADDED
@@ -0,0 +1,1054 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generators for some classic graphs.
2
+
3
+ The typical graph builder function is called as follows:
4
+
5
+ >>> G = nx.complete_graph(100)
6
+
7
+ returning the complete graph on n nodes labeled 0, .., 99
8
+ as a simple graph. Except for `empty_graph`, all the functions
9
+ in this module return a Graph class (i.e. a simple, undirected graph).
10
+
11
+ """
12
+
13
+ import itertools
14
+ import numbers
15
+
16
+ import networkx as nx
17
+ from networkx.classes import Graph
18
+ from networkx.exception import NetworkXError
19
+ from networkx.utils import nodes_or_number, pairwise
20
+
21
+ __all__ = [
22
+ "balanced_tree",
23
+ "barbell_graph",
24
+ "binomial_tree",
25
+ "complete_graph",
26
+ "complete_multipartite_graph",
27
+ "circular_ladder_graph",
28
+ "circulant_graph",
29
+ "cycle_graph",
30
+ "dorogovtsev_goltsev_mendes_graph",
31
+ "empty_graph",
32
+ "full_rary_tree",
33
+ "kneser_graph",
34
+ "ladder_graph",
35
+ "lollipop_graph",
36
+ "null_graph",
37
+ "path_graph",
38
+ "star_graph",
39
+ "tadpole_graph",
40
+ "trivial_graph",
41
+ "turan_graph",
42
+ "wheel_graph",
43
+ ]
44
+
45
+
46
+ # -------------------------------------------------------------------
47
+ # Some Classic Graphs
48
+ # -------------------------------------------------------------------
49
+
50
+
51
+ def _tree_edges(n, r):
52
+ if n == 0:
53
+ return
54
+ # helper function for trees
55
+ # yields edges in rooted tree at 0 with n nodes and branching ratio r
56
+ nodes = iter(range(n))
57
+ parents = [next(nodes)] # stack of max length r
58
+ while parents:
59
+ source = parents.pop(0)
60
+ for i in range(r):
61
+ try:
62
+ target = next(nodes)
63
+ parents.append(target)
64
+ yield source, target
65
+ except StopIteration:
66
+ break
67
+
68
+
69
+ @nx._dispatchable(graphs=None, returns_graph=True)
70
+ def full_rary_tree(r, n, create_using=None):
71
+ """Creates a full r-ary tree of `n` nodes.
72
+
73
+ Sometimes called a k-ary, n-ary, or m-ary tree.
74
+ "... all non-leaf nodes have exactly r children and all levels
75
+ are full except for some rightmost position of the bottom level
76
+ (if a leaf at the bottom level is missing, then so are all of the
77
+ leaves to its right." [1]_
78
+
79
+ .. plot::
80
+
81
+ >>> nx.draw(nx.full_rary_tree(2, 10))
82
+
83
+ Parameters
84
+ ----------
85
+ r : int
86
+ branching factor of the tree
87
+ n : int
88
+ Number of nodes in the tree
89
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
90
+ Graph type to create. If graph instance, then cleared before populated.
91
+
92
+ Returns
93
+ -------
94
+ G : networkx Graph
95
+ An r-ary tree with n nodes
96
+
97
+ References
98
+ ----------
99
+ .. [1] An introduction to data structures and algorithms,
100
+ James Andrew Storer, Birkhauser Boston 2001, (page 225).
101
+ """
102
+ G = empty_graph(n, create_using)
103
+ G.add_edges_from(_tree_edges(n, r))
104
+ return G
105
+
106
+
107
+ @nx._dispatchable(graphs=None, returns_graph=True)
108
+ def kneser_graph(n, k):
109
+ """Returns the Kneser Graph with parameters `n` and `k`.
110
+
111
+ The Kneser Graph has nodes that are k-tuples (subsets) of the integers
112
+ between 0 and ``n-1``. Nodes are adjacent if their corresponding sets are disjoint.
113
+
114
+ Parameters
115
+ ----------
116
+ n: int
117
+ Number of integers from which to make node subsets.
118
+ Subsets are drawn from ``set(range(n))``.
119
+ k: int
120
+ Size of the subsets.
121
+
122
+ Returns
123
+ -------
124
+ G : NetworkX Graph
125
+
126
+ Examples
127
+ --------
128
+ >>> G = nx.kneser_graph(5, 2)
129
+ >>> G.number_of_nodes()
130
+ 10
131
+ >>> G.number_of_edges()
132
+ 15
133
+ >>> nx.is_isomorphic(G, nx.petersen_graph())
134
+ True
135
+ """
136
+ if n <= 0:
137
+ raise NetworkXError("n should be greater than zero")
138
+ if k <= 0 or k > n:
139
+ raise NetworkXError("k should be greater than zero and smaller than n")
140
+
141
+ G = nx.Graph()
142
+ # Create all k-subsets of [0, 1, ..., n-1]
143
+ subsets = list(itertools.combinations(range(n), k))
144
+
145
+ if 2 * k > n:
146
+ G.add_nodes_from(subsets)
147
+
148
+ universe = set(range(n))
149
+ comb = itertools.combinations # only to make it all fit on one line
150
+ G.add_edges_from((s, t) for s in subsets for t in comb(universe - set(s), k))
151
+ return G
152
+
153
+
154
+ @nx._dispatchable(graphs=None, returns_graph=True)
155
+ def balanced_tree(r, h, create_using=None):
156
+ """Returns the perfectly balanced `r`-ary tree of height `h`.
157
+
158
+ .. plot::
159
+
160
+ >>> nx.draw(nx.balanced_tree(2, 3))
161
+
162
+ Parameters
163
+ ----------
164
+ r : int
165
+ Branching factor of the tree; each node will have `r`
166
+ children.
167
+
168
+ h : int
169
+ Height of the tree.
170
+
171
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
172
+ Graph type to create. If graph instance, then cleared before populated.
173
+
174
+ Returns
175
+ -------
176
+ G : NetworkX graph
177
+ A balanced `r`-ary tree of height `h`.
178
+
179
+ Notes
180
+ -----
181
+ This is the rooted tree where all leaves are at distance `h` from
182
+ the root. The root has degree `r` and all other internal nodes
183
+ have degree `r + 1`.
184
+
185
+ Node labels are integers, starting from zero.
186
+
187
+ A balanced tree is also known as a *complete r-ary tree*.
188
+
189
+ """
190
+ # The number of nodes in the balanced tree is `1 + r + ... + r^h`,
191
+ # which is computed by using the closed-form formula for a geometric
192
+ # sum with ratio `r`. In the special case that `r` is 1, the number
193
+ # of nodes is simply `h + 1` (since the tree is actually a path
194
+ # graph).
195
+ if r == 1:
196
+ n = h + 1
197
+ else:
198
+ # This must be an integer if both `r` and `h` are integers. If
199
+ # they are not, we force integer division anyway.
200
+ n = (1 - r ** (h + 1)) // (1 - r)
201
+ return full_rary_tree(r, n, create_using=create_using)
202
+
203
+
204
+ @nx._dispatchable(graphs=None, returns_graph=True)
205
+ def barbell_graph(m1, m2, create_using=None):
206
+ """Returns the Barbell Graph: two complete graphs connected by a path.
207
+
208
+ .. plot::
209
+
210
+ >>> nx.draw(nx.barbell_graph(4, 2))
211
+
212
+ Parameters
213
+ ----------
214
+ m1 : int
215
+ Size of the left and right barbells, must be greater than 2.
216
+
217
+ m2 : int
218
+ Length of the path connecting the barbells.
219
+
220
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
221
+ Graph type to create. If graph instance, then cleared before populated.
222
+ Only undirected Graphs are supported.
223
+
224
+ Returns
225
+ -------
226
+ G : NetworkX graph
227
+ A barbell graph.
228
+
229
+ Notes
230
+ -----
231
+
232
+
233
+ Two identical complete graphs $K_{m1}$ form the left and right bells,
234
+ and are connected by a path $P_{m2}$.
235
+
236
+ The `2*m1+m2` nodes are numbered
237
+ `0, ..., m1-1` for the left barbell,
238
+ `m1, ..., m1+m2-1` for the path,
239
+ and `m1+m2, ..., 2*m1+m2-1` for the right barbell.
240
+
241
+ The 3 subgraphs are joined via the edges `(m1-1, m1)` and
242
+ `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete
243
+ graphs joined together.
244
+
245
+ This graph is an extremal example in David Aldous
246
+ and Jim Fill's e-text on Random Walks on Graphs.
247
+
248
+ """
249
+ if m1 < 2:
250
+ raise NetworkXError("Invalid graph description, m1 should be >=2")
251
+ if m2 < 0:
252
+ raise NetworkXError("Invalid graph description, m2 should be >=0")
253
+
254
+ # left barbell
255
+ G = complete_graph(m1, create_using)
256
+ if G.is_directed():
257
+ raise NetworkXError("Directed Graph not supported")
258
+
259
+ # connecting path
260
+ G.add_nodes_from(range(m1, m1 + m2 - 1))
261
+ if m2 > 1:
262
+ G.add_edges_from(pairwise(range(m1, m1 + m2)))
263
+
264
+ # right barbell
265
+ G.add_edges_from(
266
+ (u, v) for u in range(m1 + m2, 2 * m1 + m2) for v in range(u + 1, 2 * m1 + m2)
267
+ )
268
+
269
+ # connect it up
270
+ G.add_edge(m1 - 1, m1)
271
+ if m2 > 0:
272
+ G.add_edge(m1 + m2 - 1, m1 + m2)
273
+
274
+ return G
275
+
276
+
277
+ @nx._dispatchable(graphs=None, returns_graph=True)
278
+ def binomial_tree(n, create_using=None):
279
+ """Returns the Binomial Tree of order n.
280
+
281
+ The binomial tree of order 0 consists of a single node. A binomial tree of order k
282
+ is defined recursively by linking two binomial trees of order k-1: the root of one is
283
+ the leftmost child of the root of the other.
284
+
285
+ .. plot::
286
+
287
+ >>> nx.draw(nx.binomial_tree(3))
288
+
289
+ Parameters
290
+ ----------
291
+ n : int
292
+ Order of the binomial tree.
293
+
294
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
295
+ Graph type to create. If graph instance, then cleared before populated.
296
+
297
+ Returns
298
+ -------
299
+ G : NetworkX graph
300
+ A binomial tree of $2^n$ nodes and $2^n - 1$ edges.
301
+
302
+ """
303
+ G = nx.empty_graph(1, create_using)
304
+
305
+ N = 1
306
+ for i in range(n):
307
+ # Use G.edges() to ensure 2-tuples. G.edges is 3-tuple for MultiGraph
308
+ edges = [(u + N, v + N) for (u, v) in G.edges()]
309
+ G.add_edges_from(edges)
310
+ G.add_edge(0, N)
311
+ N *= 2
312
+ return G
313
+
314
+
315
+ @nx._dispatchable(graphs=None, returns_graph=True)
316
+ @nodes_or_number(0)
317
+ def complete_graph(n, create_using=None):
318
+ """Return the complete graph `K_n` with n nodes.
319
+
320
+ A complete graph on `n` nodes means that all pairs
321
+ of distinct nodes have an edge connecting them.
322
+
323
+ .. plot::
324
+
325
+ >>> nx.draw(nx.complete_graph(5))
326
+
327
+ Parameters
328
+ ----------
329
+ n : int or iterable container of nodes
330
+ If n is an integer, nodes are from range(n).
331
+ If n is a container of nodes, those nodes appear in the graph.
332
+ Warning: n is not checked for duplicates and if present the
333
+ resulting graph may not be as desired. Make sure you have no duplicates.
334
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
335
+ Graph type to create. If graph instance, then cleared before populated.
336
+
337
+ Examples
338
+ --------
339
+ >>> G = nx.complete_graph(9)
340
+ >>> len(G)
341
+ 9
342
+ >>> G.size()
343
+ 36
344
+ >>> G = nx.complete_graph(range(11, 14))
345
+ >>> list(G.nodes())
346
+ [11, 12, 13]
347
+ >>> G = nx.complete_graph(4, nx.DiGraph())
348
+ >>> G.is_directed()
349
+ True
350
+
351
+ """
352
+ _, nodes = n
353
+ G = empty_graph(nodes, create_using)
354
+ if len(nodes) > 1:
355
+ if G.is_directed():
356
+ edges = itertools.permutations(nodes, 2)
357
+ else:
358
+ edges = itertools.combinations(nodes, 2)
359
+ G.add_edges_from(edges)
360
+ return G
361
+
362
+
363
+ @nx._dispatchable(graphs=None, returns_graph=True)
364
+ def circular_ladder_graph(n, create_using=None):
365
+ """Returns the circular ladder graph $CL_n$ of length n.
366
+
367
+ $CL_n$ consists of two concentric n-cycles in which
368
+ each of the n pairs of concentric nodes are joined by an edge.
369
+
370
+ Node labels are the integers 0 to n-1
371
+
372
+ .. plot::
373
+
374
+ >>> nx.draw(nx.circular_ladder_graph(5))
375
+
376
+ """
377
+ G = ladder_graph(n, create_using)
378
+ G.add_edge(0, n - 1)
379
+ G.add_edge(n, 2 * n - 1)
380
+ return G
381
+
382
+
383
+ @nx._dispatchable(graphs=None, returns_graph=True)
384
+ def circulant_graph(n, offsets, create_using=None):
385
+ r"""Returns the circulant graph $Ci_n(x_1, x_2, ..., x_m)$ with $n$ nodes.
386
+
387
+ The circulant graph $Ci_n(x_1, ..., x_m)$ consists of $n$ nodes $0, ..., n-1$
388
+ such that node $i$ is connected to nodes $(i + x) \mod n$ and $(i - x) \mod n$
389
+ for all $x$ in $x_1, ..., x_m$. Thus $Ci_n(1)$ is a cycle graph.
390
+
391
+ .. plot::
392
+
393
+ >>> nx.draw(nx.circulant_graph(10, [1]))
394
+
395
+ Parameters
396
+ ----------
397
+ n : integer
398
+ The number of nodes in the graph.
399
+ offsets : list of integers
400
+ A list of node offsets, $x_1$ up to $x_m$, as described above.
401
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
402
+ Graph type to create. If graph instance, then cleared before populated.
403
+
404
+ Returns
405
+ -------
406
+ NetworkX Graph of type create_using
407
+
408
+ Examples
409
+ --------
410
+ Many well-known graph families are subfamilies of the circulant graphs;
411
+ for example, to create the cycle graph on n points, we connect every
412
+ node to nodes on either side (with offset plus or minus one). For n = 10,
413
+
414
+ >>> G = nx.circulant_graph(10, [1])
415
+ >>> edges = [
416
+ ... (0, 9),
417
+ ... (0, 1),
418
+ ... (1, 2),
419
+ ... (2, 3),
420
+ ... (3, 4),
421
+ ... (4, 5),
422
+ ... (5, 6),
423
+ ... (6, 7),
424
+ ... (7, 8),
425
+ ... (8, 9),
426
+ ... ]
427
+ >>> sorted(edges) == sorted(G.edges())
428
+ True
429
+
430
+ Similarly, we can create the complete graph
431
+ on 5 points with the set of offsets [1, 2]:
432
+
433
+ >>> G = nx.circulant_graph(5, [1, 2])
434
+ >>> edges = [
435
+ ... (0, 1),
436
+ ... (0, 2),
437
+ ... (0, 3),
438
+ ... (0, 4),
439
+ ... (1, 2),
440
+ ... (1, 3),
441
+ ... (1, 4),
442
+ ... (2, 3),
443
+ ... (2, 4),
444
+ ... (3, 4),
445
+ ... ]
446
+ >>> sorted(edges) == sorted(G.edges())
447
+ True
448
+
449
+ """
450
+ G = empty_graph(n, create_using)
451
+ for i in range(n):
452
+ for j in offsets:
453
+ G.add_edge(i, (i - j) % n)
454
+ G.add_edge(i, (i + j) % n)
455
+ return G
456
+
457
+
458
+ @nx._dispatchable(graphs=None, returns_graph=True)
459
+ @nodes_or_number(0)
460
+ def cycle_graph(n, create_using=None):
461
+ """Returns the cycle graph $C_n$ of cyclically connected nodes.
462
+
463
+ $C_n$ is a path with its two end-nodes connected.
464
+
465
+ .. plot::
466
+
467
+ >>> nx.draw(nx.cycle_graph(5))
468
+
469
+ Parameters
470
+ ----------
471
+ n : int or iterable container of nodes
472
+ If n is an integer, nodes are from `range(n)`.
473
+ If n is a container of nodes, those nodes appear in the graph.
474
+ Warning: n is not checked for duplicates and if present the
475
+ resulting graph may not be as desired. Make sure you have no duplicates.
476
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
477
+ Graph type to create. If graph instance, then cleared before populated.
478
+
479
+ Notes
480
+ -----
481
+ If create_using is directed, the direction is in increasing order.
482
+
483
+ """
484
+ _, nodes = n
485
+ G = empty_graph(nodes, create_using)
486
+ G.add_edges_from(pairwise(nodes, cyclic=True))
487
+ return G
488
+
489
+
490
+ @nx._dispatchable(graphs=None, returns_graph=True)
491
+ def dorogovtsev_goltsev_mendes_graph(n, create_using=None):
492
+ """Returns the hierarchically constructed Dorogovtsev-Goltsev-Mendes graph.
493
+
494
+ The Dorogovtsev-Goltsev-Mendes [1]_ procedure produces a scale-free graph
495
+ deterministically with the following properties for a given `n`:
496
+ - Total number of nodes = ``3 * (3**n + 1) / 2``
497
+ - Total number of edges = ``3 ** (n + 1)``
498
+
499
+ .. plot::
500
+
501
+ >>> nx.draw(nx.dorogovtsev_goltsev_mendes_graph(3))
502
+
503
+ Parameters
504
+ ----------
505
+ n : integer
506
+ The generation number.
507
+
508
+ create_using : NetworkX Graph, optional
509
+ Graph type to be returned. Directed graphs and multi graphs are not
510
+ supported.
511
+
512
+ Returns
513
+ -------
514
+ G : NetworkX Graph
515
+
516
+ Examples
517
+ --------
518
+ >>> G = nx.dorogovtsev_goltsev_mendes_graph(3)
519
+ >>> G.number_of_nodes()
520
+ 15
521
+ >>> G.number_of_edges()
522
+ 27
523
+ >>> nx.is_planar(G)
524
+ True
525
+
526
+ References
527
+ ----------
528
+ .. [1] S. N. Dorogovtsev, A. V. Goltsev and J. F. F. Mendes,
529
+ "Pseudofractal scale-free web", Physical Review E 65, 066122, 2002.
530
+ https://arxiv.org/pdf/cond-mat/0112143.pdf
531
+ """
532
+ G = empty_graph(0, create_using)
533
+ if G.is_directed():
534
+ raise NetworkXError("Directed Graph not supported")
535
+ if G.is_multigraph():
536
+ raise NetworkXError("Multigraph not supported")
537
+
538
+ G.add_edge(0, 1)
539
+ if n == 0:
540
+ return G
541
+ new_node = 2 # next node to be added
542
+ for i in range(1, n + 1): # iterate over number of generations.
543
+ last_generation_edges = list(G.edges())
544
+ number_of_edges_in_last_generation = len(last_generation_edges)
545
+ for j in range(number_of_edges_in_last_generation):
546
+ G.add_edge(new_node, last_generation_edges[j][0])
547
+ G.add_edge(new_node, last_generation_edges[j][1])
548
+ new_node += 1
549
+ return G
550
+
551
+
552
+ @nx._dispatchable(graphs=None, returns_graph=True)
553
+ @nodes_or_number(0)
554
+ def empty_graph(n=0, create_using=None, default=Graph):
555
+ """Returns the empty graph with n nodes and zero edges.
556
+
557
+ .. plot::
558
+
559
+ >>> nx.draw(nx.empty_graph(5))
560
+
561
+ Parameters
562
+ ----------
563
+ n : int or iterable container of nodes (default = 0)
564
+ If n is an integer, nodes are from `range(n)`.
565
+ If n is a container of nodes, those nodes appear in the graph.
566
+ create_using : Graph Instance, Constructor or None
567
+ Indicator of type of graph to return.
568
+ If a Graph-type instance, then clear and use it.
569
+ If None, use the `default` constructor.
570
+ If a constructor, call it to create an empty graph.
571
+ default : Graph constructor (optional, default = nx.Graph)
572
+ The constructor to use if create_using is None.
573
+ If None, then nx.Graph is used.
574
+ This is used when passing an unknown `create_using` value
575
+ through your home-grown function to `empty_graph` and
576
+ you want a default constructor other than nx.Graph.
577
+
578
+ Examples
579
+ --------
580
+ >>> G = nx.empty_graph(10)
581
+ >>> G.number_of_nodes()
582
+ 10
583
+ >>> G.number_of_edges()
584
+ 0
585
+ >>> G = nx.empty_graph("ABC")
586
+ >>> G.number_of_nodes()
587
+ 3
588
+ >>> sorted(G)
589
+ ['A', 'B', 'C']
590
+
591
+ Notes
592
+ -----
593
+ The variable create_using should be a Graph Constructor or a
594
+ "graph"-like object. Constructors, e.g. `nx.Graph` or `nx.MultiGraph`
595
+ will be used to create the returned graph. "graph"-like objects
596
+ will be cleared (nodes and edges will be removed) and refitted as
597
+ an empty "graph" with nodes specified in n. This capability
598
+ is useful for specifying the class-nature of the resulting empty
599
+ "graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.).
600
+
601
+ The variable create_using has three main uses:
602
+ Firstly, the variable create_using can be used to create an
603
+ empty digraph, multigraph, etc. For example,
604
+
605
+ >>> n = 10
606
+ >>> G = nx.empty_graph(n, create_using=nx.DiGraph)
607
+
608
+ will create an empty digraph on n nodes.
609
+
610
+ Secondly, one can pass an existing graph (digraph, multigraph,
611
+ etc.) via create_using. For example, if G is an existing graph
612
+ (resp. digraph, multigraph, etc.), then empty_graph(n, create_using=G)
613
+ will empty G (i.e. delete all nodes and edges using G.clear())
614
+ and then add n nodes and zero edges, and return the modified graph.
615
+
616
+ Thirdly, when constructing your home-grown graph creation function
617
+ you can use empty_graph to construct the graph by passing a user
618
+ defined create_using to empty_graph. In this case, if you want the
619
+ default constructor to be other than nx.Graph, specify `default`.
620
+
621
+ >>> def mygraph(n, create_using=None):
622
+ ... G = nx.empty_graph(n, create_using, nx.MultiGraph)
623
+ ... G.add_edges_from([(0, 1), (0, 1)])
624
+ ... return G
625
+ >>> G = mygraph(3)
626
+ >>> G.is_multigraph()
627
+ True
628
+ >>> G = mygraph(3, nx.Graph)
629
+ >>> G.is_multigraph()
630
+ False
631
+
632
+ See also create_empty_copy(G).
633
+
634
+ """
635
+ if create_using is None:
636
+ G = default()
637
+ elif isinstance(create_using, type):
638
+ G = create_using()
639
+ elif not hasattr(create_using, "adj"):
640
+ raise TypeError("create_using is not a valid NetworkX graph type or instance")
641
+ else:
642
+ # create_using is a NetworkX style Graph
643
+ create_using.clear()
644
+ G = create_using
645
+
646
+ _, nodes = n
647
+ G.add_nodes_from(nodes)
648
+ return G
649
+
650
+
651
+ @nx._dispatchable(graphs=None, returns_graph=True)
652
+ def ladder_graph(n, create_using=None):
653
+ """Returns the Ladder graph of length n.
654
+
655
+ This is two paths of n nodes, with
656
+ each pair connected by a single edge.
657
+
658
+ Node labels are the integers 0 to 2*n - 1.
659
+
660
+ .. plot::
661
+
662
+ >>> nx.draw(nx.ladder_graph(5))
663
+
664
+ """
665
+ G = empty_graph(2 * n, create_using)
666
+ if G.is_directed():
667
+ raise NetworkXError("Directed Graph not supported")
668
+ G.add_edges_from(pairwise(range(n)))
669
+ G.add_edges_from(pairwise(range(n, 2 * n)))
670
+ G.add_edges_from((v, v + n) for v in range(n))
671
+ return G
672
+
673
+
674
+ @nx._dispatchable(graphs=None, returns_graph=True)
675
+ @nodes_or_number([0, 1])
676
+ def lollipop_graph(m, n, create_using=None):
677
+ """Returns the Lollipop Graph; ``K_m`` connected to ``P_n``.
678
+
679
+ This is the Barbell Graph without the right barbell.
680
+
681
+ .. plot::
682
+
683
+ >>> nx.draw(nx.lollipop_graph(3, 4))
684
+
685
+ Parameters
686
+ ----------
687
+ m, n : int or iterable container of nodes
688
+ If an integer, nodes are from ``range(m)`` and ``range(m, m+n)``.
689
+ If a container of nodes, those nodes appear in the graph.
690
+ Warning: `m` and `n` are not checked for duplicates and if present the
691
+ resulting graph may not be as desired. Make sure you have no duplicates.
692
+
693
+ The nodes for `m` appear in the complete graph $K_m$ and the nodes
694
+ for `n` appear in the path $P_n$
695
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
696
+ Graph type to create. If graph instance, then cleared before populated.
697
+
698
+ Returns
699
+ -------
700
+ Networkx graph
701
+ A complete graph with `m` nodes connected to a path of length `n`.
702
+
703
+ Notes
704
+ -----
705
+ The 2 subgraphs are joined via an edge ``(m-1, m)``.
706
+ If ``n=0``, this is merely a complete graph.
707
+
708
+ (This graph is an extremal example in David Aldous and Jim
709
+ Fill's etext on Random Walks on Graphs.)
710
+
711
+ """
712
+ m, m_nodes = m
713
+ M = len(m_nodes)
714
+ if M < 2:
715
+ raise NetworkXError("Invalid description: m should indicate at least 2 nodes")
716
+
717
+ n, n_nodes = n
718
+ if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral):
719
+ n_nodes = list(range(M, M + n))
720
+ N = len(n_nodes)
721
+
722
+ # the ball
723
+ G = complete_graph(m_nodes, create_using)
724
+ if G.is_directed():
725
+ raise NetworkXError("Directed Graph not supported")
726
+
727
+ # the stick
728
+ G.add_nodes_from(n_nodes)
729
+ if N > 1:
730
+ G.add_edges_from(pairwise(n_nodes))
731
+
732
+ if len(G) != M + N:
733
+ raise NetworkXError("Nodes must be distinct in containers m and n")
734
+
735
+ # connect ball to stick
736
+ if M > 0 and N > 0:
737
+ G.add_edge(m_nodes[-1], n_nodes[0])
738
+ return G
739
+
740
+
741
+ @nx._dispatchable(graphs=None, returns_graph=True)
742
+ def null_graph(create_using=None):
743
+ """Returns the Null graph with no nodes or edges.
744
+
745
+ See empty_graph for the use of create_using.
746
+
747
+ """
748
+ G = empty_graph(0, create_using)
749
+ return G
750
+
751
+
752
+ @nx._dispatchable(graphs=None, returns_graph=True)
753
+ @nodes_or_number(0)
754
+ def path_graph(n, create_using=None):
755
+ """Returns the Path graph `P_n` of linearly connected nodes.
756
+
757
+ .. plot::
758
+
759
+ >>> nx.draw(nx.path_graph(5))
760
+
761
+ Parameters
762
+ ----------
763
+ n : int or iterable
764
+ If an integer, nodes are 0 to n - 1.
765
+ If an iterable of nodes, in the order they appear in the path.
766
+ Warning: n is not checked for duplicates and if present the
767
+ resulting graph may not be as desired. Make sure you have no duplicates.
768
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
769
+ Graph type to create. If graph instance, then cleared before populated.
770
+
771
+ """
772
+ _, nodes = n
773
+ G = empty_graph(nodes, create_using)
774
+ G.add_edges_from(pairwise(nodes))
775
+ return G
776
+
777
+
778
+ @nx._dispatchable(graphs=None, returns_graph=True)
779
+ @nodes_or_number(0)
780
+ def star_graph(n, create_using=None):
781
+ """Return the star graph
782
+
783
+ The star graph consists of one center node connected to n outer nodes.
784
+
785
+ .. plot::
786
+
787
+ >>> nx.draw(nx.star_graph(6))
788
+
789
+ Parameters
790
+ ----------
791
+ n : int or iterable
792
+ If an integer, node labels are 0 to n with center 0.
793
+ If an iterable of nodes, the center is the first.
794
+ Warning: n is not checked for duplicates and if present the
795
+ resulting graph may not be as desired. Make sure you have no duplicates.
796
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
797
+ Graph type to create. If graph instance, then cleared before populated.
798
+
799
+ Notes
800
+ -----
801
+ The graph has n+1 nodes for integer n.
802
+ So star_graph(3) is the same as star_graph(range(4)).
803
+ """
804
+ n, nodes = n
805
+ if isinstance(n, numbers.Integral):
806
+ nodes.append(int(n)) # there should be n+1 nodes
807
+ G = empty_graph(nodes, create_using)
808
+ if G.is_directed():
809
+ raise NetworkXError("Directed Graph not supported")
810
+
811
+ if len(nodes) > 1:
812
+ hub, *spokes = nodes
813
+ G.add_edges_from((hub, node) for node in spokes)
814
+ return G
815
+
816
+
817
+ @nx._dispatchable(graphs=None, returns_graph=True)
818
+ @nodes_or_number([0, 1])
819
+ def tadpole_graph(m, n, create_using=None):
820
+ """Returns the (m,n)-tadpole graph; ``C_m`` connected to ``P_n``.
821
+
822
+ This graph on m+n nodes connects a cycle of size `m` to a path of length `n`.
823
+ It looks like a tadpole. It is also called a kite graph or a dragon graph.
824
+
825
+ .. plot::
826
+
827
+ >>> nx.draw(nx.tadpole_graph(3, 5))
828
+
829
+ Parameters
830
+ ----------
831
+ m, n : int or iterable container of nodes
832
+ If an integer, nodes are from ``range(m)`` and ``range(m,m+n)``.
833
+ If a container of nodes, those nodes appear in the graph.
834
+ Warning: `m` and `n` are not checked for duplicates and if present the
835
+ resulting graph may not be as desired.
836
+
837
+ The nodes for `m` appear in the cycle graph $C_m$ and the nodes
838
+ for `n` appear in the path $P_n$.
839
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
840
+ Graph type to create. If graph instance, then cleared before populated.
841
+
842
+ Returns
843
+ -------
844
+ Networkx graph
845
+ A cycle of size `m` connected to a path of length `n`.
846
+
847
+ Raises
848
+ ------
849
+ NetworkXError
850
+ If ``m < 2``. The tadpole graph is undefined for ``m<2``.
851
+
852
+ Notes
853
+ -----
854
+ The 2 subgraphs are joined via an edge ``(m-1, m)``.
855
+ If ``n=0``, this is a cycle graph.
856
+ `m` and/or `n` can be a container of nodes instead of an integer.
857
+
858
+ """
859
+ m, m_nodes = m
860
+ M = len(m_nodes)
861
+ if M < 2:
862
+ raise NetworkXError("Invalid description: m should indicate at least 2 nodes")
863
+
864
+ n, n_nodes = n
865
+ if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral):
866
+ n_nodes = list(range(M, M + n))
867
+
868
+ # the circle
869
+ G = cycle_graph(m_nodes, create_using)
870
+ if G.is_directed():
871
+ raise NetworkXError("Directed Graph not supported")
872
+
873
+ # the stick
874
+ nx.add_path(G, [m_nodes[-1]] + list(n_nodes))
875
+
876
+ return G
877
+
878
+
879
+ @nx._dispatchable(graphs=None, returns_graph=True)
880
+ def trivial_graph(create_using=None):
881
+ """Return the Trivial graph with one node (with label 0) and no edges.
882
+
883
+ .. plot::
884
+
885
+ >>> nx.draw(nx.trivial_graph(), with_labels=True)
886
+
887
+ """
888
+ G = empty_graph(1, create_using)
889
+ return G
890
+
891
+
892
+ @nx._dispatchable(graphs=None, returns_graph=True)
893
+ def turan_graph(n, r):
894
+ r"""Return the Turan Graph
895
+
896
+ The Turan Graph is a complete multipartite graph on $n$ nodes
897
+ with $r$ disjoint subsets. That is, edges connect each node to
898
+ every node not in its subset.
899
+
900
+ Given $n$ and $r$, we create a complete multipartite graph with
901
+ $r-(n \mod r)$ partitions of size $n/r$, rounded down, and
902
+ $n \mod r$ partitions of size $n/r+1$, rounded down.
903
+
904
+ .. plot::
905
+
906
+ >>> nx.draw(nx.turan_graph(6, 2))
907
+
908
+ Parameters
909
+ ----------
910
+ n : int
911
+ The number of nodes.
912
+ r : int
913
+ The number of partitions.
914
+ Must be less than or equal to n.
915
+
916
+ Notes
917
+ -----
918
+ Must satisfy $1 <= r <= n$.
919
+ The graph has $(r-1)(n^2)/(2r)$ edges, rounded down.
920
+ """
921
+
922
+ if not 1 <= r <= n:
923
+ raise NetworkXError("Must satisfy 1 <= r <= n")
924
+
925
+ partitions = [n // r] * (r - (n % r)) + [n // r + 1] * (n % r)
926
+ G = complete_multipartite_graph(*partitions)
927
+ return G
928
+
929
+
930
+ @nx._dispatchable(graphs=None, returns_graph=True)
931
+ @nodes_or_number(0)
932
+ def wheel_graph(n, create_using=None):
933
+ """Return the wheel graph
934
+
935
+ The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.
936
+
937
+ .. plot::
938
+
939
+ >>> nx.draw(nx.wheel_graph(5))
940
+
941
+ Parameters
942
+ ----------
943
+ n : int or iterable
944
+ If an integer, node labels are 0 to n with center 0.
945
+ If an iterable of nodes, the center is the first.
946
+ Warning: n is not checked for duplicates and if present the
947
+ resulting graph may not be as desired. Make sure you have no duplicates.
948
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
949
+ Graph type to create. If graph instance, then cleared before populated.
950
+
951
+ Node labels are the integers 0 to n - 1.
952
+ """
953
+ _, nodes = n
954
+ G = empty_graph(nodes, create_using)
955
+ if G.is_directed():
956
+ raise NetworkXError("Directed Graph not supported")
957
+
958
+ if len(nodes) > 1:
959
+ hub, *rim = nodes
960
+ G.add_edges_from((hub, node) for node in rim)
961
+ if len(rim) > 1:
962
+ G.add_edges_from(pairwise(rim, cyclic=True))
963
+ return G
964
+
965
+
966
+ @nx._dispatchable(graphs=None, returns_graph=True)
967
+ def complete_multipartite_graph(*subset_sizes):
968
+ """Returns the complete multipartite graph with the specified subset sizes.
969
+
970
+ .. plot::
971
+
972
+ >>> nx.draw(nx.complete_multipartite_graph(1, 2, 3))
973
+
974
+ Parameters
975
+ ----------
976
+ subset_sizes : tuple of integers or tuple of node iterables
977
+ The arguments can either all be integer number of nodes or they
978
+ can all be iterables of nodes. If integers, they represent the
979
+ number of nodes in each subset of the multipartite graph.
980
+ If iterables, each is used to create the nodes for that subset.
981
+ The length of subset_sizes is the number of subsets.
982
+
983
+ Returns
984
+ -------
985
+ G : NetworkX Graph
986
+ Returns the complete multipartite graph with the specified subsets.
987
+
988
+ For each node, the node attribute 'subset' is an integer
989
+ indicating which subset contains the node.
990
+
991
+ Examples
992
+ --------
993
+ Creating a complete tripartite graph, with subsets of one, two, and three
994
+ nodes, respectively.
995
+
996
+ >>> G = nx.complete_multipartite_graph(1, 2, 3)
997
+ >>> [G.nodes[u]["subset"] for u in G]
998
+ [0, 1, 1, 2, 2, 2]
999
+ >>> list(G.edges(0))
1000
+ [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
1001
+ >>> list(G.edges(2))
1002
+ [(2, 0), (2, 3), (2, 4), (2, 5)]
1003
+ >>> list(G.edges(4))
1004
+ [(4, 0), (4, 1), (4, 2)]
1005
+
1006
+ >>> G = nx.complete_multipartite_graph("a", "bc", "def")
1007
+ >>> [G.nodes[u]["subset"] for u in sorted(G)]
1008
+ [0, 1, 1, 2, 2, 2]
1009
+
1010
+ Notes
1011
+ -----
1012
+ This function generalizes several other graph builder functions.
1013
+
1014
+ - If no subset sizes are given, this returns the null graph.
1015
+ - If a single subset size `n` is given, this returns the empty graph on
1016
+ `n` nodes.
1017
+ - If two subset sizes `m` and `n` are given, this returns the complete
1018
+ bipartite graph on `m + n` nodes.
1019
+ - If subset sizes `1` and `n` are given, this returns the star graph on
1020
+ `n + 1` nodes.
1021
+
1022
+ See also
1023
+ --------
1024
+ complete_bipartite_graph
1025
+ """
1026
+ # The complete multipartite graph is an undirected simple graph.
1027
+ G = Graph()
1028
+
1029
+ if len(subset_sizes) == 0:
1030
+ return G
1031
+
1032
+ # set up subsets of nodes
1033
+ try:
1034
+ extents = pairwise(itertools.accumulate((0,) + subset_sizes))
1035
+ subsets = [range(start, end) for start, end in extents]
1036
+ except TypeError:
1037
+ subsets = subset_sizes
1038
+ else:
1039
+ if any(size < 0 for size in subset_sizes):
1040
+ raise NetworkXError(f"Negative number of nodes not valid: {subset_sizes}")
1041
+
1042
+ # add nodes with subset attribute
1043
+ # while checking that ints are not mixed with iterables
1044
+ try:
1045
+ for i, subset in enumerate(subsets):
1046
+ G.add_nodes_from(subset, subset=i)
1047
+ except TypeError as err:
1048
+ raise NetworkXError("Arguments must be all ints or all iterables") from err
1049
+
1050
+ # Across subsets, all nodes should be adjacent.
1051
+ # We can use itertools.combinations() because undirected.
1052
+ for subset1, subset2 in itertools.combinations(subsets, 2):
1053
+ G.add_edges_from(itertools.product(subset1, subset2))
1054
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/cographs.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Generators for cographs
2
+
3
+ A cograph is a graph containing no path on four vertices.
4
+ Cographs or $P_4$-free graphs can be obtained from a single vertex
5
+ by disjoint union and complementation operations.
6
+
7
+ References
8
+ ----------
9
+ .. [0] D.G. Corneil, H. Lerchs, L.Stewart Burlingham,
10
+ "Complement reducible graphs",
11
+ Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174,
12
+ ISSN 0166-218X.
13
+ """
14
+ import networkx as nx
15
+ from networkx.utils import py_random_state
16
+
17
+ __all__ = ["random_cograph"]
18
+
19
+
20
+ @py_random_state(1)
21
+ @nx._dispatchable(graphs=None, returns_graph=True)
22
+ def random_cograph(n, seed=None):
23
+ r"""Returns a random cograph with $2 ^ n$ nodes.
24
+
25
+ A cograph is a graph containing no path on four vertices.
26
+ Cographs or $P_4$-free graphs can be obtained from a single vertex
27
+ by disjoint union and complementation operations.
28
+
29
+ This generator starts off from a single vertex and performs disjoint
30
+ union and full join operations on itself.
31
+ The decision on which operation will take place is random.
32
+
33
+ Parameters
34
+ ----------
35
+ n : int
36
+ The order of the cograph.
37
+ seed : integer, random_state, or None (default)
38
+ Indicator of random number generation state.
39
+ See :ref:`Randomness<randomness>`.
40
+
41
+ Returns
42
+ -------
43
+ G : A random graph containing no path on four vertices.
44
+
45
+ See Also
46
+ --------
47
+ full_join
48
+ union
49
+
50
+ References
51
+ ----------
52
+ .. [1] D.G. Corneil, H. Lerchs, L.Stewart Burlingham,
53
+ "Complement reducible graphs",
54
+ Discrete Applied Mathematics, Volume 3, Issue 3, 1981, Pages 163-174,
55
+ ISSN 0166-218X.
56
+ """
57
+ R = nx.empty_graph(1)
58
+
59
+ for i in range(n):
60
+ RR = nx.relabel_nodes(R.copy(), lambda x: x + len(R))
61
+
62
+ if seed.randint(0, 1) == 0:
63
+ R = nx.full_join(R, RR)
64
+ else:
65
+ R = nx.disjoint_union(R, RR)
66
+
67
+ return R
llmeval-env/lib/python3.10/site-packages/networkx/generators/community.py ADDED
@@ -0,0 +1,1069 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generators for classes of graphs used in studying social networks."""
2
+ import itertools
3
+ import math
4
+
5
+ import networkx as nx
6
+ from networkx.utils import py_random_state
7
+
8
+ __all__ = [
9
+ "caveman_graph",
10
+ "connected_caveman_graph",
11
+ "relaxed_caveman_graph",
12
+ "random_partition_graph",
13
+ "planted_partition_graph",
14
+ "gaussian_random_partition_graph",
15
+ "ring_of_cliques",
16
+ "windmill_graph",
17
+ "stochastic_block_model",
18
+ "LFR_benchmark_graph",
19
+ ]
20
+
21
+
22
+ @nx._dispatchable(graphs=None, returns_graph=True)
23
+ def caveman_graph(l, k):
24
+ """Returns a caveman graph of `l` cliques of size `k`.
25
+
26
+ Parameters
27
+ ----------
28
+ l : int
29
+ Number of cliques
30
+ k : int
31
+ Size of cliques
32
+
33
+ Returns
34
+ -------
35
+ G : NetworkX Graph
36
+ caveman graph
37
+
38
+ Notes
39
+ -----
40
+ This returns an undirected graph, it can be converted to a directed
41
+ graph using :func:`nx.to_directed`, or a multigraph using
42
+ ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
43
+ described in [1]_ and it is unclear which of the directed
44
+ generalizations is most useful.
45
+
46
+ Examples
47
+ --------
48
+ >>> G = nx.caveman_graph(3, 3)
49
+
50
+ See also
51
+ --------
52
+
53
+ connected_caveman_graph
54
+
55
+ References
56
+ ----------
57
+ .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
58
+ Amer. J. Soc. 105, 493-527, 1999.
59
+ """
60
+ # l disjoint cliques of size k
61
+ G = nx.empty_graph(l * k)
62
+ if k > 1:
63
+ for start in range(0, l * k, k):
64
+ edges = itertools.combinations(range(start, start + k), 2)
65
+ G.add_edges_from(edges)
66
+ return G
67
+
68
+
69
+ @nx._dispatchable(graphs=None, returns_graph=True)
70
+ def connected_caveman_graph(l, k):
71
+ """Returns a connected caveman graph of `l` cliques of size `k`.
72
+
73
+ The connected caveman graph is formed by creating `n` cliques of size
74
+ `k`, then a single edge in each clique is rewired to a node in an
75
+ adjacent clique.
76
+
77
+ Parameters
78
+ ----------
79
+ l : int
80
+ number of cliques
81
+ k : int
82
+ size of cliques (k at least 2 or NetworkXError is raised)
83
+
84
+ Returns
85
+ -------
86
+ G : NetworkX Graph
87
+ connected caveman graph
88
+
89
+ Raises
90
+ ------
91
+ NetworkXError
92
+ If the size of cliques `k` is smaller than 2.
93
+
94
+ Notes
95
+ -----
96
+ This returns an undirected graph, it can be converted to a directed
97
+ graph using :func:`nx.to_directed`, or a multigraph using
98
+ ``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
99
+ described in [1]_ and it is unclear which of the directed
100
+ generalizations is most useful.
101
+
102
+ Examples
103
+ --------
104
+ >>> G = nx.connected_caveman_graph(3, 3)
105
+
106
+ References
107
+ ----------
108
+ .. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
109
+ Amer. J. Soc. 105, 493-527, 1999.
110
+ """
111
+ if k < 2:
112
+ raise nx.NetworkXError(
113
+ "The size of cliques in a connected caveman graph must be at least 2."
114
+ )
115
+
116
+ G = nx.caveman_graph(l, k)
117
+ for start in range(0, l * k, k):
118
+ G.remove_edge(start, start + 1)
119
+ G.add_edge(start, (start - 1) % (l * k))
120
+ return G
121
+
122
+
123
+ @py_random_state(3)
124
+ @nx._dispatchable(graphs=None, returns_graph=True)
125
+ def relaxed_caveman_graph(l, k, p, seed=None):
126
+ """Returns a relaxed caveman graph.
127
+
128
+ A relaxed caveman graph starts with `l` cliques of size `k`. Edges are
129
+ then randomly rewired with probability `p` to link different cliques.
130
+
131
+ Parameters
132
+ ----------
133
+ l : int
134
+ Number of groups
135
+ k : int
136
+ Size of cliques
137
+ p : float
138
+ Probability of rewiring each edge.
139
+ seed : integer, random_state, or None (default)
140
+ Indicator of random number generation state.
141
+ See :ref:`Randomness<randomness>`.
142
+
143
+ Returns
144
+ -------
145
+ G : NetworkX Graph
146
+ Relaxed Caveman Graph
147
+
148
+ Raises
149
+ ------
150
+ NetworkXError
151
+ If p is not in [0,1]
152
+
153
+ Examples
154
+ --------
155
+ >>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
156
+
157
+ References
158
+ ----------
159
+ .. [1] Santo Fortunato, Community Detection in Graphs,
160
+ Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
161
+ https://arxiv.org/abs/0906.0612
162
+ """
163
+ G = nx.caveman_graph(l, k)
164
+ nodes = list(G)
165
+ for u, v in G.edges():
166
+ if seed.random() < p: # rewire the edge
167
+ x = seed.choice(nodes)
168
+ if G.has_edge(u, x):
169
+ continue
170
+ G.remove_edge(u, v)
171
+ G.add_edge(u, x)
172
+ return G
173
+
174
+
175
+ @py_random_state(3)
176
+ @nx._dispatchable(graphs=None, returns_graph=True)
177
+ def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
178
+ """Returns the random partition graph with a partition of sizes.
179
+
180
+ A partition graph is a graph of communities with sizes defined by
181
+ s in sizes. Nodes in the same group are connected with probability
182
+ p_in and nodes of different groups are connected with probability
183
+ p_out.
184
+
185
+ Parameters
186
+ ----------
187
+ sizes : list of ints
188
+ Sizes of groups
189
+ p_in : float
190
+ probability of edges with in groups
191
+ p_out : float
192
+ probability of edges between groups
193
+ directed : boolean optional, default=False
194
+ Whether to create a directed graph
195
+ seed : integer, random_state, or None (default)
196
+ Indicator of random number generation state.
197
+ See :ref:`Randomness<randomness>`.
198
+
199
+ Returns
200
+ -------
201
+ G : NetworkX Graph or DiGraph
202
+ random partition graph of size sum(gs)
203
+
204
+ Raises
205
+ ------
206
+ NetworkXError
207
+ If p_in or p_out is not in [0,1]
208
+
209
+ Examples
210
+ --------
211
+ >>> G = nx.random_partition_graph([10, 10, 10], 0.25, 0.01)
212
+ >>> len(G)
213
+ 30
214
+ >>> partition = G.graph["partition"]
215
+ >>> len(partition)
216
+ 3
217
+
218
+ Notes
219
+ -----
220
+ This is a generalization of the planted-l-partition described in
221
+ [1]_. It allows for the creation of groups of any size.
222
+
223
+ The partition is store as a graph attribute 'partition'.
224
+
225
+ References
226
+ ----------
227
+ .. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
228
+ Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
229
+ """
230
+ # Use geometric method for O(n+m) complexity algorithm
231
+ # partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation'))
232
+ if not 0.0 <= p_in <= 1.0:
233
+ raise nx.NetworkXError("p_in must be in [0,1]")
234
+ if not 0.0 <= p_out <= 1.0:
235
+ raise nx.NetworkXError("p_out must be in [0,1]")
236
+
237
+ # create connection matrix
238
+ num_blocks = len(sizes)
239
+ p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)]
240
+ for r in range(num_blocks):
241
+ p[r][r] = p_in
242
+
243
+ return stochastic_block_model(
244
+ sizes,
245
+ p,
246
+ nodelist=None,
247
+ seed=seed,
248
+ directed=directed,
249
+ selfloops=False,
250
+ sparse=True,
251
+ )
252
+
253
+
254
+ @py_random_state(4)
255
+ @nx._dispatchable(graphs=None, returns_graph=True)
256
+ def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
257
+ """Returns the planted l-partition graph.
258
+
259
+ This model partitions a graph with n=l*k vertices in
260
+ l groups with k vertices each. Vertices of the same
261
+ group are linked with a probability p_in, and vertices
262
+ of different groups are linked with probability p_out.
263
+
264
+ Parameters
265
+ ----------
266
+ l : int
267
+ Number of groups
268
+ k : int
269
+ Number of vertices in each group
270
+ p_in : float
271
+ probability of connecting vertices within a group
272
+ p_out : float
273
+ probability of connected vertices between groups
274
+ seed : integer, random_state, or None (default)
275
+ Indicator of random number generation state.
276
+ See :ref:`Randomness<randomness>`.
277
+ directed : bool,optional (default=False)
278
+ If True return a directed graph
279
+
280
+ Returns
281
+ -------
282
+ G : NetworkX Graph or DiGraph
283
+ planted l-partition graph
284
+
285
+ Raises
286
+ ------
287
+ NetworkXError
288
+ If `p_in`, `p_out` are not in `[0, 1]`
289
+
290
+ Examples
291
+ --------
292
+ >>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42)
293
+
294
+ See Also
295
+ --------
296
+ random_partition_model
297
+
298
+ References
299
+ ----------
300
+ .. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
301
+ on the planted partition model,
302
+ Random Struct. Algor. 18 (2001) 116-140.
303
+
304
+ .. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
305
+ Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
306
+ """
307
+ return random_partition_graph([k] * l, p_in, p_out, seed=seed, directed=directed)
308
+
309
+
310
+ @py_random_state(6)
311
+ @nx._dispatchable(graphs=None, returns_graph=True)
312
+ def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False, seed=None):
313
+ """Generate a Gaussian random partition graph.
314
+
315
+ A Gaussian random partition graph is created by creating k partitions
316
+ each with a size drawn from a normal distribution with mean s and variance
317
+ s/v. Nodes are connected within clusters with probability p_in and
318
+ between clusters with probability p_out[1]
319
+
320
+ Parameters
321
+ ----------
322
+ n : int
323
+ Number of nodes in the graph
324
+ s : float
325
+ Mean cluster size
326
+ v : float
327
+ Shape parameter. The variance of cluster size distribution is s/v.
328
+ p_in : float
329
+ Probability of intra cluster connection.
330
+ p_out : float
331
+ Probability of inter cluster connection.
332
+ directed : boolean, optional default=False
333
+ Whether to create a directed graph or not
334
+ seed : integer, random_state, or None (default)
335
+ Indicator of random number generation state.
336
+ See :ref:`Randomness<randomness>`.
337
+
338
+ Returns
339
+ -------
340
+ G : NetworkX Graph or DiGraph
341
+ gaussian random partition graph
342
+
343
+ Raises
344
+ ------
345
+ NetworkXError
346
+ If s is > n
347
+ If p_in or p_out is not in [0,1]
348
+
349
+ Notes
350
+ -----
351
+ Note the number of partitions is dependent on s,v and n, and that the
352
+ last partition may be considerably smaller, as it is sized to simply
353
+ fill out the nodes [1]
354
+
355
+ See Also
356
+ --------
357
+ random_partition_graph
358
+
359
+ Examples
360
+ --------
361
+ >>> G = nx.gaussian_random_partition_graph(100, 10, 10, 0.25, 0.1)
362
+ >>> len(G)
363
+ 100
364
+
365
+ References
366
+ ----------
367
+ .. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
368
+ Experiments on Graph Clustering Algorithms,
369
+ In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
370
+ """
371
+ if s > n:
372
+ raise nx.NetworkXError("s must be <= n")
373
+ assigned = 0
374
+ sizes = []
375
+ while True:
376
+ size = int(seed.gauss(s, s / v + 0.5))
377
+ if size < 1: # how to handle 0 or negative sizes?
378
+ continue
379
+ if assigned + size >= n:
380
+ sizes.append(n - assigned)
381
+ break
382
+ assigned += size
383
+ sizes.append(size)
384
+ return random_partition_graph(sizes, p_in, p_out, seed=seed, directed=directed)
385
+
386
+
387
+ @nx._dispatchable(graphs=None, returns_graph=True)
388
+ def ring_of_cliques(num_cliques, clique_size):
389
+ """Defines a "ring of cliques" graph.
390
+
391
+ A ring of cliques graph is consisting of cliques, connected through single
392
+ links. Each clique is a complete graph.
393
+
394
+ Parameters
395
+ ----------
396
+ num_cliques : int
397
+ Number of cliques
398
+ clique_size : int
399
+ Size of cliques
400
+
401
+ Returns
402
+ -------
403
+ G : NetworkX Graph
404
+ ring of cliques graph
405
+
406
+ Raises
407
+ ------
408
+ NetworkXError
409
+ If the number of cliques is lower than 2 or
410
+ if the size of cliques is smaller than 2.
411
+
412
+ Examples
413
+ --------
414
+ >>> G = nx.ring_of_cliques(8, 4)
415
+
416
+ See Also
417
+ --------
418
+ connected_caveman_graph
419
+
420
+ Notes
421
+ -----
422
+ The `connected_caveman_graph` graph removes a link from each clique to
423
+ connect it with the next clique. Instead, the `ring_of_cliques` graph
424
+ simply adds the link without removing any link from the cliques.
425
+ """
426
+ if num_cliques < 2:
427
+ raise nx.NetworkXError("A ring of cliques must have at least two cliques")
428
+ if clique_size < 2:
429
+ raise nx.NetworkXError("The cliques must have at least two nodes")
430
+
431
+ G = nx.Graph()
432
+ for i in range(num_cliques):
433
+ edges = itertools.combinations(
434
+ range(i * clique_size, i * clique_size + clique_size), 2
435
+ )
436
+ G.add_edges_from(edges)
437
+ G.add_edge(
438
+ i * clique_size + 1, (i + 1) * clique_size % (num_cliques * clique_size)
439
+ )
440
+ return G
441
+
442
+
443
+ @nx._dispatchable(graphs=None, returns_graph=True)
444
+ def windmill_graph(n, k):
445
+ """Generate a windmill graph.
446
+ A windmill graph is a graph of `n` cliques each of size `k` that are all
447
+ joined at one node.
448
+ It can be thought of as taking a disjoint union of `n` cliques of size `k`,
449
+ selecting one point from each, and contracting all of the selected points.
450
+ Alternatively, one could generate `n` cliques of size `k-1` and one node
451
+ that is connected to all other nodes in the graph.
452
+
453
+ Parameters
454
+ ----------
455
+ n : int
456
+ Number of cliques
457
+ k : int
458
+ Size of cliques
459
+
460
+ Returns
461
+ -------
462
+ G : NetworkX Graph
463
+ windmill graph with n cliques of size k
464
+
465
+ Raises
466
+ ------
467
+ NetworkXError
468
+ If the number of cliques is less than two
469
+ If the size of the cliques are less than two
470
+
471
+ Examples
472
+ --------
473
+ >>> G = nx.windmill_graph(4, 5)
474
+
475
+ Notes
476
+ -----
477
+ The node labeled `0` will be the node connected to all other nodes.
478
+ Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters
479
+ are in the opposite order as the parameters of this method.
480
+ """
481
+ if n < 2:
482
+ msg = "A windmill graph must have at least two cliques"
483
+ raise nx.NetworkXError(msg)
484
+ if k < 2:
485
+ raise nx.NetworkXError("The cliques must have at least two nodes")
486
+
487
+ G = nx.disjoint_union_all(
488
+ itertools.chain(
489
+ [nx.complete_graph(k)], (nx.complete_graph(k - 1) for _ in range(n - 1))
490
+ )
491
+ )
492
+ G.add_edges_from((0, i) for i in range(k, G.number_of_nodes()))
493
+ return G
494
+
495
+
496
+ @py_random_state(3)
497
+ @nx._dispatchable(graphs=None, returns_graph=True)
498
+ def stochastic_block_model(
499
+ sizes, p, nodelist=None, seed=None, directed=False, selfloops=False, sparse=True
500
+ ):
501
+ """Returns a stochastic block model graph.
502
+
503
+ This model partitions the nodes in blocks of arbitrary sizes, and places
504
+ edges between pairs of nodes independently, with a probability that depends
505
+ on the blocks.
506
+
507
+ Parameters
508
+ ----------
509
+ sizes : list of ints
510
+ Sizes of blocks
511
+ p : list of list of floats
512
+ Element (r,s) gives the density of edges going from the nodes
513
+ of group r to nodes of group s.
514
+ p must match the number of groups (len(sizes) == len(p)),
515
+ and it must be symmetric if the graph is undirected.
516
+ nodelist : list, optional
517
+ The block tags are assigned according to the node identifiers
518
+ in nodelist. If nodelist is None, then the ordering is the
519
+ range [0,sum(sizes)-1].
520
+ seed : integer, random_state, or None (default)
521
+ Indicator of random number generation state.
522
+ See :ref:`Randomness<randomness>`.
523
+ directed : boolean optional, default=False
524
+ Whether to create a directed graph or not.
525
+ selfloops : boolean optional, default=False
526
+ Whether to include self-loops or not.
527
+ sparse: boolean optional, default=True
528
+ Use the sparse heuristic to speed up the generator.
529
+
530
+ Returns
531
+ -------
532
+ g : NetworkX Graph or DiGraph
533
+ Stochastic block model graph of size sum(sizes)
534
+
535
+ Raises
536
+ ------
537
+ NetworkXError
538
+ If probabilities are not in [0,1].
539
+ If the probability matrix is not square (directed case).
540
+ If the probability matrix is not symmetric (undirected case).
541
+ If the sizes list does not match nodelist or the probability matrix.
542
+ If nodelist contains duplicate.
543
+
544
+ Examples
545
+ --------
546
+ >>> sizes = [75, 75, 300]
547
+ >>> probs = [[0.25, 0.05, 0.02], [0.05, 0.35, 0.07], [0.02, 0.07, 0.40]]
548
+ >>> g = nx.stochastic_block_model(sizes, probs, seed=0)
549
+ >>> len(g)
550
+ 450
551
+ >>> H = nx.quotient_graph(g, g.graph["partition"], relabel=True)
552
+ >>> for v in H.nodes(data=True):
553
+ ... print(round(v[1]["density"], 3))
554
+ 0.245
555
+ 0.348
556
+ 0.405
557
+ >>> for v in H.edges(data=True):
558
+ ... print(round(1.0 * v[2]["weight"] / (sizes[v[0]] * sizes[v[1]]), 3))
559
+ 0.051
560
+ 0.022
561
+ 0.07
562
+
563
+ See Also
564
+ --------
565
+ random_partition_graph
566
+ planted_partition_graph
567
+ gaussian_random_partition_graph
568
+ gnp_random_graph
569
+
570
+ References
571
+ ----------
572
+ .. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S.,
573
+ "Stochastic blockmodels: First steps",
574
+ Social networks, 5(2), 109-137, 1983.
575
+ """
576
+ # Check if dimensions match
577
+ if len(sizes) != len(p):
578
+ raise nx.NetworkXException("'sizes' and 'p' do not match.")
579
+ # Check for probability symmetry (undirected) and shape (directed)
580
+ for row in p:
581
+ if len(p) != len(row):
582
+ raise nx.NetworkXException("'p' must be a square matrix.")
583
+ if not directed:
584
+ p_transpose = [list(i) for i in zip(*p)]
585
+ for i in zip(p, p_transpose):
586
+ for j in zip(i[0], i[1]):
587
+ if abs(j[0] - j[1]) > 1e-08:
588
+ raise nx.NetworkXException("'p' must be symmetric.")
589
+ # Check for probability range
590
+ for row in p:
591
+ for prob in row:
592
+ if prob < 0 or prob > 1:
593
+ raise nx.NetworkXException("Entries of 'p' not in [0,1].")
594
+ # Check for nodelist consistency
595
+ if nodelist is not None:
596
+ if len(nodelist) != sum(sizes):
597
+ raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
598
+ if len(nodelist) != len(set(nodelist)):
599
+ raise nx.NetworkXException("nodelist contains duplicate.")
600
+ else:
601
+ nodelist = range(sum(sizes))
602
+
603
+ # Setup the graph conditionally to the directed switch.
604
+ block_range = range(len(sizes))
605
+ if directed:
606
+ g = nx.DiGraph()
607
+ block_iter = itertools.product(block_range, block_range)
608
+ else:
609
+ g = nx.Graph()
610
+ block_iter = itertools.combinations_with_replacement(block_range, 2)
611
+ # Split nodelist in a partition (list of sets).
612
+ size_cumsum = [sum(sizes[0:x]) for x in range(len(sizes) + 1)]
613
+ g.graph["partition"] = [
614
+ set(nodelist[size_cumsum[x] : size_cumsum[x + 1]])
615
+ for x in range(len(size_cumsum) - 1)
616
+ ]
617
+ # Setup nodes and graph name
618
+ for block_id, nodes in enumerate(g.graph["partition"]):
619
+ for node in nodes:
620
+ g.add_node(node, block=block_id)
621
+
622
+ g.name = "stochastic_block_model"
623
+
624
+ # Test for edge existence
625
+ parts = g.graph["partition"]
626
+ for i, j in block_iter:
627
+ if i == j:
628
+ if directed:
629
+ if selfloops:
630
+ edges = itertools.product(parts[i], parts[i])
631
+ else:
632
+ edges = itertools.permutations(parts[i], 2)
633
+ else:
634
+ edges = itertools.combinations(parts[i], 2)
635
+ if selfloops:
636
+ edges = itertools.chain(edges, zip(parts[i], parts[i]))
637
+ for e in edges:
638
+ if seed.random() < p[i][j]:
639
+ g.add_edge(*e)
640
+ else:
641
+ edges = itertools.product(parts[i], parts[j])
642
+ if sparse:
643
+ if p[i][j] == 1: # Test edges cases p_ij = 0 or 1
644
+ for e in edges:
645
+ g.add_edge(*e)
646
+ elif p[i][j] > 0:
647
+ while True:
648
+ try:
649
+ logrand = math.log(seed.random())
650
+ skip = math.floor(logrand / math.log(1 - p[i][j]))
651
+ # consume "skip" edges
652
+ next(itertools.islice(edges, skip, skip), None)
653
+ e = next(edges)
654
+ g.add_edge(*e) # __safe
655
+ except StopIteration:
656
+ break
657
+ else:
658
+ for e in edges:
659
+ if seed.random() < p[i][j]:
660
+ g.add_edge(*e) # __safe
661
+ return g
662
+
663
+
664
+ def _zipf_rv_below(gamma, xmin, threshold, seed):
665
+ """Returns a random value chosen from the bounded Zipf distribution.
666
+
667
+ Repeatedly draws values from the Zipf distribution until the
668
+ threshold is met, then returns that value.
669
+ """
670
+ result = nx.utils.zipf_rv(gamma, xmin, seed)
671
+ while result > threshold:
672
+ result = nx.utils.zipf_rv(gamma, xmin, seed)
673
+ return result
674
+
675
+
676
+ def _powerlaw_sequence(gamma, low, high, condition, length, max_iters, seed):
677
+ """Returns a list of numbers obeying a constrained power law distribution.
678
+
679
+ ``gamma`` and ``low`` are the parameters for the Zipf distribution.
680
+
681
+ ``high`` is the maximum allowed value for values draw from the Zipf
682
+ distribution. For more information, see :func:`_zipf_rv_below`.
683
+
684
+ ``condition`` and ``length`` are Boolean-valued functions on
685
+ lists. While generating the list, random values are drawn and
686
+ appended to the list until ``length`` is satisfied by the created
687
+ list. Once ``condition`` is satisfied, the sequence generated in
688
+ this way is returned.
689
+
690
+ ``max_iters`` indicates the number of times to generate a list
691
+ satisfying ``length``. If the number of iterations exceeds this
692
+ value, :exc:`~networkx.exception.ExceededMaxIterations` is raised.
693
+
694
+ seed : integer, random_state, or None (default)
695
+ Indicator of random number generation state.
696
+ See :ref:`Randomness<randomness>`.
697
+ """
698
+ for i in range(max_iters):
699
+ seq = []
700
+ while not length(seq):
701
+ seq.append(_zipf_rv_below(gamma, low, high, seed))
702
+ if condition(seq):
703
+ return seq
704
+ raise nx.ExceededMaxIterations("Could not create power law sequence")
705
+
706
+
707
+ def _hurwitz_zeta(x, q, tolerance):
708
+ """The Hurwitz zeta function, or the Riemann zeta function of two arguments.
709
+
710
+ ``x`` must be greater than one and ``q`` must be positive.
711
+
712
+ This function repeatedly computes subsequent partial sums until
713
+ convergence, as decided by ``tolerance``.
714
+ """
715
+ z = 0
716
+ z_prev = -float("inf")
717
+ k = 0
718
+ while abs(z - z_prev) > tolerance:
719
+ z_prev = z
720
+ z += 1 / ((k + q) ** x)
721
+ k += 1
722
+ return z
723
+
724
+
725
+ def _generate_min_degree(gamma, average_degree, max_degree, tolerance, max_iters):
726
+ """Returns a minimum degree from the given average degree."""
727
+ # Defines zeta function whether or not Scipy is available
728
+ try:
729
+ from scipy.special import zeta
730
+ except ImportError:
731
+
732
+ def zeta(x, q):
733
+ return _hurwitz_zeta(x, q, tolerance)
734
+
735
+ min_deg_top = max_degree
736
+ min_deg_bot = 1
737
+ min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
738
+ itrs = 0
739
+ mid_avg_deg = 0
740
+ while abs(mid_avg_deg - average_degree) > tolerance:
741
+ if itrs > max_iters:
742
+ raise nx.ExceededMaxIterations("Could not match average_degree")
743
+ mid_avg_deg = 0
744
+ for x in range(int(min_deg_mid), max_degree + 1):
745
+ mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid)
746
+ if mid_avg_deg > average_degree:
747
+ min_deg_top = min_deg_mid
748
+ min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
749
+ else:
750
+ min_deg_bot = min_deg_mid
751
+ min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot
752
+ itrs += 1
753
+ # return int(min_deg_mid + 0.5)
754
+ return round(min_deg_mid)
755
+
756
+
757
+ def _generate_communities(degree_seq, community_sizes, mu, max_iters, seed):
758
+ """Returns a list of sets, each of which represents a community.
759
+
760
+ ``degree_seq`` is the degree sequence that must be met by the
761
+ graph.
762
+
763
+ ``community_sizes`` is the community size distribution that must be
764
+ met by the generated list of sets.
765
+
766
+ ``mu`` is a float in the interval [0, 1] indicating the fraction of
767
+ intra-community edges incident to each node.
768
+
769
+ ``max_iters`` is the number of times to try to add a node to a
770
+ community. This must be greater than the length of
771
+ ``degree_seq``, otherwise this function will always fail. If
772
+ the number of iterations exceeds this value,
773
+ :exc:`~networkx.exception.ExceededMaxIterations` is raised.
774
+
775
+ seed : integer, random_state, or None (default)
776
+ Indicator of random number generation state.
777
+ See :ref:`Randomness<randomness>`.
778
+
779
+ The communities returned by this are sets of integers in the set {0,
780
+ ..., *n* - 1}, where *n* is the length of ``degree_seq``.
781
+
782
+ """
783
+ # This assumes the nodes in the graph will be natural numbers.
784
+ result = [set() for _ in community_sizes]
785
+ n = len(degree_seq)
786
+ free = list(range(n))
787
+ for i in range(max_iters):
788
+ v = free.pop()
789
+ c = seed.choice(range(len(community_sizes)))
790
+ # s = int(degree_seq[v] * (1 - mu) + 0.5)
791
+ s = round(degree_seq[v] * (1 - mu))
792
+ # If the community is large enough, add the node to the chosen
793
+ # community. Otherwise, return it to the list of unaffiliated
794
+ # nodes.
795
+ if s < community_sizes[c]:
796
+ result[c].add(v)
797
+ else:
798
+ free.append(v)
799
+ # If the community is too big, remove a node from it.
800
+ if len(result[c]) > community_sizes[c]:
801
+ free.append(result[c].pop())
802
+ if not free:
803
+ return result
804
+ msg = "Could not assign communities; try increasing min_community"
805
+ raise nx.ExceededMaxIterations(msg)
806
+
807
+
808
+ @py_random_state(11)
809
+ @nx._dispatchable(graphs=None, returns_graph=True)
810
+ def LFR_benchmark_graph(
811
+ n,
812
+ tau1,
813
+ tau2,
814
+ mu,
815
+ average_degree=None,
816
+ min_degree=None,
817
+ max_degree=None,
818
+ min_community=None,
819
+ max_community=None,
820
+ tol=1.0e-7,
821
+ max_iters=500,
822
+ seed=None,
823
+ ):
824
+ r"""Returns the LFR benchmark graph.
825
+
826
+ This algorithm proceeds as follows:
827
+
828
+ 1) Find a degree sequence with a power law distribution, and minimum
829
+ value ``min_degree``, which has approximate average degree
830
+ ``average_degree``. This is accomplished by either
831
+
832
+ a) specifying ``min_degree`` and not ``average_degree``,
833
+ b) specifying ``average_degree`` and not ``min_degree``, in which
834
+ case a suitable minimum degree will be found.
835
+
836
+ ``max_degree`` can also be specified, otherwise it will be set to
837
+ ``n``. Each node *u* will have $\mu \mathrm{deg}(u)$ edges
838
+ joining it to nodes in communities other than its own and $(1 -
839
+ \mu) \mathrm{deg}(u)$ edges joining it to nodes in its own
840
+ community.
841
+ 2) Generate community sizes according to a power law distribution
842
+ with exponent ``tau2``. If ``min_community`` and
843
+ ``max_community`` are not specified they will be selected to be
844
+ ``min_degree`` and ``max_degree``, respectively. Community sizes
845
+ are generated until the sum of their sizes equals ``n``.
846
+ 3) Each node will be randomly assigned a community with the
847
+ condition that the community is large enough for the node's
848
+ intra-community degree, $(1 - \mu) \mathrm{deg}(u)$ as
849
+ described in step 2. If a community grows too large, a random node
850
+ will be selected for reassignment to a new community, until all
851
+ nodes have been assigned a community.
852
+ 4) Each node *u* then adds $(1 - \mu) \mathrm{deg}(u)$
853
+ intra-community edges and $\mu \mathrm{deg}(u)$ inter-community
854
+ edges.
855
+
856
+ Parameters
857
+ ----------
858
+ n : int
859
+ Number of nodes in the created graph.
860
+
861
+ tau1 : float
862
+ Power law exponent for the degree distribution of the created
863
+ graph. This value must be strictly greater than one.
864
+
865
+ tau2 : float
866
+ Power law exponent for the community size distribution in the
867
+ created graph. This value must be strictly greater than one.
868
+
869
+ mu : float
870
+ Fraction of inter-community edges incident to each node. This
871
+ value must be in the interval [0, 1].
872
+
873
+ average_degree : float
874
+ Desired average degree of nodes in the created graph. This value
875
+ must be in the interval [0, *n*]. Exactly one of this and
876
+ ``min_degree`` must be specified, otherwise a
877
+ :exc:`NetworkXError` is raised.
878
+
879
+ min_degree : int
880
+ Minimum degree of nodes in the created graph. This value must be
881
+ in the interval [0, *n*]. Exactly one of this and
882
+ ``average_degree`` must be specified, otherwise a
883
+ :exc:`NetworkXError` is raised.
884
+
885
+ max_degree : int
886
+ Maximum degree of nodes in the created graph. If not specified,
887
+ this is set to ``n``, the total number of nodes in the graph.
888
+
889
+ min_community : int
890
+ Minimum size of communities in the graph. If not specified, this
891
+ is set to ``min_degree``.
892
+
893
+ max_community : int
894
+ Maximum size of communities in the graph. If not specified, this
895
+ is set to ``n``, the total number of nodes in the graph.
896
+
897
+ tol : float
898
+ Tolerance when comparing floats, specifically when comparing
899
+ average degree values.
900
+
901
+ max_iters : int
902
+ Maximum number of iterations to try to create the community sizes,
903
+ degree distribution, and community affiliations.
904
+
905
+ seed : integer, random_state, or None (default)
906
+ Indicator of random number generation state.
907
+ See :ref:`Randomness<randomness>`.
908
+
909
+ Returns
910
+ -------
911
+ G : NetworkX graph
912
+ The LFR benchmark graph generated according to the specified
913
+ parameters.
914
+
915
+ Each node in the graph has a node attribute ``'community'`` that
916
+ stores the community (that is, the set of nodes) that includes
917
+ it.
918
+
919
+ Raises
920
+ ------
921
+ NetworkXError
922
+ If any of the parameters do not meet their upper and lower bounds:
923
+
924
+ - ``tau1`` and ``tau2`` must be strictly greater than 1.
925
+ - ``mu`` must be in [0, 1].
926
+ - ``max_degree`` must be in {1, ..., *n*}.
927
+ - ``min_community`` and ``max_community`` must be in {0, ...,
928
+ *n*}.
929
+
930
+ If not exactly one of ``average_degree`` and ``min_degree`` is
931
+ specified.
932
+
933
+ If ``min_degree`` is not specified and a suitable ``min_degree``
934
+ cannot be found.
935
+
936
+ ExceededMaxIterations
937
+ If a valid degree sequence cannot be created within
938
+ ``max_iters`` number of iterations.
939
+
940
+ If a valid set of community sizes cannot be created within
941
+ ``max_iters`` number of iterations.
942
+
943
+ If a valid community assignment cannot be created within ``10 *
944
+ n * max_iters`` number of iterations.
945
+
946
+ Examples
947
+ --------
948
+ Basic usage::
949
+
950
+ >>> from networkx.generators.community import LFR_benchmark_graph
951
+ >>> n = 250
952
+ >>> tau1 = 3
953
+ >>> tau2 = 1.5
954
+ >>> mu = 0.1
955
+ >>> G = LFR_benchmark_graph(
956
+ ... n, tau1, tau2, mu, average_degree=5, min_community=20, seed=10
957
+ ... )
958
+
959
+ Continuing the example above, you can get the communities from the
960
+ node attributes of the graph::
961
+
962
+ >>> communities = {frozenset(G.nodes[v]["community"]) for v in G}
963
+
964
+ Notes
965
+ -----
966
+ This algorithm differs slightly from the original way it was
967
+ presented in [1].
968
+
969
+ 1) Rather than connecting the graph via a configuration model then
970
+ rewiring to match the intra-community and inter-community
971
+ degrees, we do this wiring explicitly at the end, which should be
972
+ equivalent.
973
+ 2) The code posted on the author's website [2] calculates the random
974
+ power law distributed variables and their average using
975
+ continuous approximations, whereas we use the discrete
976
+ distributions here as both degree and community size are
977
+ discrete.
978
+
979
+ Though the authors describe the algorithm as quite robust, testing
980
+ during development indicates that a somewhat narrower parameter set
981
+ is likely to successfully produce a graph. Some suggestions have
982
+ been provided in the event of exceptions.
983
+
984
+ References
985
+ ----------
986
+ .. [1] "Benchmark graphs for testing community detection algorithms",
987
+ Andrea Lancichinetti, Santo Fortunato, and Filippo Radicchi,
988
+ Phys. Rev. E 78, 046110 2008
989
+ .. [2] https://www.santofortunato.net/resources
990
+
991
+ """
992
+ # Perform some basic parameter validation.
993
+ if not tau1 > 1:
994
+ raise nx.NetworkXError("tau1 must be greater than one")
995
+ if not tau2 > 1:
996
+ raise nx.NetworkXError("tau2 must be greater than one")
997
+ if not 0 <= mu <= 1:
998
+ raise nx.NetworkXError("mu must be in the interval [0, 1]")
999
+
1000
+ # Validate parameters for generating the degree sequence.
1001
+ if max_degree is None:
1002
+ max_degree = n
1003
+ elif not 0 < max_degree <= n:
1004
+ raise nx.NetworkXError("max_degree must be in the interval (0, n]")
1005
+ if not ((min_degree is None) ^ (average_degree is None)):
1006
+ raise nx.NetworkXError(
1007
+ "Must assign exactly one of min_degree and average_degree"
1008
+ )
1009
+ if min_degree is None:
1010
+ min_degree = _generate_min_degree(
1011
+ tau1, average_degree, max_degree, tol, max_iters
1012
+ )
1013
+
1014
+ # Generate a degree sequence with a power law distribution.
1015
+ low, high = min_degree, max_degree
1016
+
1017
+ def condition(seq):
1018
+ return sum(seq) % 2 == 0
1019
+
1020
+ def length(seq):
1021
+ return len(seq) >= n
1022
+
1023
+ deg_seq = _powerlaw_sequence(tau1, low, high, condition, length, max_iters, seed)
1024
+
1025
+ # Validate parameters for generating the community size sequence.
1026
+ if min_community is None:
1027
+ min_community = min(deg_seq)
1028
+ if max_community is None:
1029
+ max_community = max(deg_seq)
1030
+
1031
+ # Generate a community size sequence with a power law distribution.
1032
+ #
1033
+ # TODO The original code incremented the number of iterations each
1034
+ # time a new Zipf random value was drawn from the distribution. This
1035
+ # differed from the way the number of iterations was incremented in
1036
+ # `_powerlaw_degree_sequence`, so this code was changed to match
1037
+ # that one. As a result, this code is allowed many more chances to
1038
+ # generate a valid community size sequence.
1039
+ low, high = min_community, max_community
1040
+
1041
+ def condition(seq):
1042
+ return sum(seq) == n
1043
+
1044
+ def length(seq):
1045
+ return sum(seq) >= n
1046
+
1047
+ comms = _powerlaw_sequence(tau2, low, high, condition, length, max_iters, seed)
1048
+
1049
+ # Generate the communities based on the given degree sequence and
1050
+ # community sizes.
1051
+ max_iters *= 10 * n
1052
+ communities = _generate_communities(deg_seq, comms, mu, max_iters, seed)
1053
+
1054
+ # Finally, generate the benchmark graph based on the given
1055
+ # communities, joining nodes according to the intra- and
1056
+ # inter-community degrees.
1057
+ G = nx.Graph()
1058
+ G.add_nodes_from(range(n))
1059
+ for c in communities:
1060
+ for u in c:
1061
+ while G.degree(u) < round(deg_seq[u] * (1 - mu)):
1062
+ v = seed.choice(list(c))
1063
+ G.add_edge(u, v)
1064
+ while G.degree(u) < deg_seq[u]:
1065
+ v = seed.choice(range(n))
1066
+ if v not in c:
1067
+ G.add_edge(u, v)
1068
+ G.nodes[u]["community"] = c
1069
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/degree_seq.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate graphs with a given degree sequence or expected degree sequence.
2
+ """
3
+
4
+ import heapq
5
+ import math
6
+ from itertools import chain, combinations, zip_longest
7
+ from operator import itemgetter
8
+
9
+ import networkx as nx
10
+ from networkx.utils import py_random_state, random_weighted_sample
11
+
12
+ __all__ = [
13
+ "configuration_model",
14
+ "directed_configuration_model",
15
+ "expected_degree_graph",
16
+ "havel_hakimi_graph",
17
+ "directed_havel_hakimi_graph",
18
+ "degree_sequence_tree",
19
+ "random_degree_sequence_graph",
20
+ ]
21
+
22
+ chaini = chain.from_iterable
23
+
24
+
25
+ def _to_stublist(degree_sequence):
26
+ """Returns a list of degree-repeated node numbers.
27
+
28
+ ``degree_sequence`` is a list of nonnegative integers representing
29
+ the degrees of nodes in a graph.
30
+
31
+ This function returns a list of node numbers with multiplicities
32
+ according to the given degree sequence. For example, if the first
33
+ element of ``degree_sequence`` is ``3``, then the first node number,
34
+ ``0``, will appear at the head of the returned list three times. The
35
+ node numbers are assumed to be the numbers zero through
36
+ ``len(degree_sequence) - 1``.
37
+
38
+ Examples
39
+ --------
40
+
41
+ >>> degree_sequence = [1, 2, 3]
42
+ >>> _to_stublist(degree_sequence)
43
+ [0, 1, 1, 2, 2, 2]
44
+
45
+ If a zero appears in the sequence, that means the node exists but
46
+ has degree zero, so that number will be skipped in the returned
47
+ list::
48
+
49
+ >>> degree_sequence = [2, 0, 1]
50
+ >>> _to_stublist(degree_sequence)
51
+ [0, 0, 2]
52
+
53
+ """
54
+ return list(chaini([n] * d for n, d in enumerate(degree_sequence)))
55
+
56
+
57
+ def _configuration_model(
58
+ deg_sequence, create_using, directed=False, in_deg_sequence=None, seed=None
59
+ ):
60
+ """Helper function for generating either undirected or directed
61
+ configuration model graphs.
62
+
63
+ ``deg_sequence`` is a list of nonnegative integers representing the
64
+ degree of the node whose label is the index of the list element.
65
+
66
+ ``create_using`` see :func:`~networkx.empty_graph`.
67
+
68
+ ``directed`` and ``in_deg_sequence`` are required if you want the
69
+ returned graph to be generated using the directed configuration
70
+ model algorithm. If ``directed`` is ``False``, then ``deg_sequence``
71
+ is interpreted as the degree sequence of an undirected graph and
72
+ ``in_deg_sequence`` is ignored. Otherwise, if ``directed`` is
73
+ ``True``, then ``deg_sequence`` is interpreted as the out-degree
74
+ sequence and ``in_deg_sequence`` as the in-degree sequence of a
75
+ directed graph.
76
+
77
+ .. note::
78
+
79
+ ``deg_sequence`` and ``in_deg_sequence`` need not be the same
80
+ length.
81
+
82
+ ``seed`` is a random.Random or numpy.random.RandomState instance
83
+
84
+ This function returns a graph, directed if and only if ``directed``
85
+ is ``True``, generated according to the configuration model
86
+ algorithm. For more information on the algorithm, see the
87
+ :func:`configuration_model` or :func:`directed_configuration_model`
88
+ functions.
89
+
90
+ """
91
+ n = len(deg_sequence)
92
+ G = nx.empty_graph(n, create_using)
93
+ # If empty, return the null graph immediately.
94
+ if n == 0:
95
+ return G
96
+ # Build a list of available degree-repeated nodes. For example,
97
+ # for degree sequence [3, 2, 1, 1, 1], the "stub list" is
98
+ # initially [0, 0, 0, 1, 1, 2, 3, 4], that is, node 0 has degree
99
+ # 3 and thus is repeated 3 times, etc.
100
+ #
101
+ # Also, shuffle the stub list in order to get a random sequence of
102
+ # node pairs.
103
+ if directed:
104
+ pairs = zip_longest(deg_sequence, in_deg_sequence, fillvalue=0)
105
+ # Unzip the list of pairs into a pair of lists.
106
+ out_deg, in_deg = zip(*pairs)
107
+
108
+ out_stublist = _to_stublist(out_deg)
109
+ in_stublist = _to_stublist(in_deg)
110
+
111
+ seed.shuffle(out_stublist)
112
+ seed.shuffle(in_stublist)
113
+ else:
114
+ stublist = _to_stublist(deg_sequence)
115
+ # Choose a random balanced bipartition of the stublist, which
116
+ # gives a random pairing of nodes. In this implementation, we
117
+ # shuffle the list and then split it in half.
118
+ n = len(stublist)
119
+ half = n // 2
120
+ seed.shuffle(stublist)
121
+ out_stublist, in_stublist = stublist[:half], stublist[half:]
122
+ G.add_edges_from(zip(out_stublist, in_stublist))
123
+ return G
124
+
125
+
126
+ @py_random_state(2)
127
+ @nx._dispatchable(graphs=None, returns_graph=True)
128
+ def configuration_model(deg_sequence, create_using=None, seed=None):
129
+ """Returns a random graph with the given degree sequence.
130
+
131
+ The configuration model generates a random pseudograph (graph with
132
+ parallel edges and self loops) by randomly assigning edges to
133
+ match the given degree sequence.
134
+
135
+ Parameters
136
+ ----------
137
+ deg_sequence : list of nonnegative integers
138
+ Each list entry corresponds to the degree of a node.
139
+ create_using : NetworkX graph constructor, optional (default MultiGraph)
140
+ Graph type to create. If graph instance, then cleared before populated.
141
+ seed : integer, random_state, or None (default)
142
+ Indicator of random number generation state.
143
+ See :ref:`Randomness<randomness>`.
144
+
145
+ Returns
146
+ -------
147
+ G : MultiGraph
148
+ A graph with the specified degree sequence.
149
+ Nodes are labeled starting at 0 with an index
150
+ corresponding to the position in deg_sequence.
151
+
152
+ Raises
153
+ ------
154
+ NetworkXError
155
+ If the degree sequence does not have an even sum.
156
+
157
+ See Also
158
+ --------
159
+ is_graphical
160
+
161
+ Notes
162
+ -----
163
+ As described by Newman [1]_.
164
+
165
+ A non-graphical degree sequence (not realizable by some simple
166
+ graph) is allowed since this function returns graphs with self
167
+ loops and parallel edges. An exception is raised if the degree
168
+ sequence does not have an even sum.
169
+
170
+ This configuration model construction process can lead to
171
+ duplicate edges and loops. You can remove the self-loops and
172
+ parallel edges (see below) which will likely result in a graph
173
+ that doesn't have the exact degree sequence specified.
174
+
175
+ The density of self-loops and parallel edges tends to decrease as
176
+ the number of nodes increases. However, typically the number of
177
+ self-loops will approach a Poisson distribution with a nonzero mean,
178
+ and similarly for the number of parallel edges. Consider a node
179
+ with *k* stubs. The probability of being joined to another stub of
180
+ the same node is basically (*k* - *1*) / *N*, where *k* is the
181
+ degree and *N* is the number of nodes. So the probability of a
182
+ self-loop scales like *c* / *N* for some constant *c*. As *N* grows,
183
+ this means we expect *c* self-loops. Similarly for parallel edges.
184
+
185
+ References
186
+ ----------
187
+ .. [1] M.E.J. Newman, "The structure and function of complex networks",
188
+ SIAM REVIEW 45-2, pp 167-256, 2003.
189
+
190
+ Examples
191
+ --------
192
+ You can create a degree sequence following a particular distribution
193
+ by using the one of the distribution functions in
194
+ :mod:`~networkx.utils.random_sequence` (or one of your own). For
195
+ example, to create an undirected multigraph on one hundred nodes
196
+ with degree sequence chosen from the power law distribution:
197
+
198
+ >>> sequence = nx.random_powerlaw_tree_sequence(100, tries=5000)
199
+ >>> G = nx.configuration_model(sequence)
200
+ >>> len(G)
201
+ 100
202
+ >>> actual_degrees = [d for v, d in G.degree()]
203
+ >>> actual_degrees == sequence
204
+ True
205
+
206
+ The returned graph is a multigraph, which may have parallel
207
+ edges. To remove any parallel edges from the returned graph:
208
+
209
+ >>> G = nx.Graph(G)
210
+
211
+ Similarly, to remove self-loops:
212
+
213
+ >>> G.remove_edges_from(nx.selfloop_edges(G))
214
+
215
+ """
216
+ if sum(deg_sequence) % 2 != 0:
217
+ msg = "Invalid degree sequence: sum of degrees must be even, not odd"
218
+ raise nx.NetworkXError(msg)
219
+
220
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
221
+ if G.is_directed():
222
+ raise nx.NetworkXNotImplemented("not implemented for directed graphs")
223
+
224
+ G = _configuration_model(deg_sequence, G, seed=seed)
225
+
226
+ return G
227
+
228
+
229
+ @py_random_state(3)
230
+ @nx._dispatchable(graphs=None, returns_graph=True)
231
+ def directed_configuration_model(
232
+ in_degree_sequence, out_degree_sequence, create_using=None, seed=None
233
+ ):
234
+ """Returns a directed_random graph with the given degree sequences.
235
+
236
+ The configuration model generates a random directed pseudograph
237
+ (graph with parallel edges and self loops) by randomly assigning
238
+ edges to match the given degree sequences.
239
+
240
+ Parameters
241
+ ----------
242
+ in_degree_sequence : list of nonnegative integers
243
+ Each list entry corresponds to the in-degree of a node.
244
+ out_degree_sequence : list of nonnegative integers
245
+ Each list entry corresponds to the out-degree of a node.
246
+ create_using : NetworkX graph constructor, optional (default MultiDiGraph)
247
+ Graph type to create. If graph instance, then cleared before populated.
248
+ seed : integer, random_state, or None (default)
249
+ Indicator of random number generation state.
250
+ See :ref:`Randomness<randomness>`.
251
+
252
+ Returns
253
+ -------
254
+ G : MultiDiGraph
255
+ A graph with the specified degree sequences.
256
+ Nodes are labeled starting at 0 with an index
257
+ corresponding to the position in deg_sequence.
258
+
259
+ Raises
260
+ ------
261
+ NetworkXError
262
+ If the degree sequences do not have the same sum.
263
+
264
+ See Also
265
+ --------
266
+ configuration_model
267
+
268
+ Notes
269
+ -----
270
+ Algorithm as described by Newman [1]_.
271
+
272
+ A non-graphical degree sequence (not realizable by some simple
273
+ graph) is allowed since this function returns graphs with self
274
+ loops and parallel edges. An exception is raised if the degree
275
+ sequences does not have the same sum.
276
+
277
+ This configuration model construction process can lead to
278
+ duplicate edges and loops. You can remove the self-loops and
279
+ parallel edges (see below) which will likely result in a graph
280
+ that doesn't have the exact degree sequence specified. This
281
+ "finite-size effect" decreases as the size of the graph increases.
282
+
283
+ References
284
+ ----------
285
+ .. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
286
+ Random graphs with arbitrary degree distributions and their applications
287
+ Phys. Rev. E, 64, 026118 (2001)
288
+
289
+ Examples
290
+ --------
291
+ One can modify the in- and out-degree sequences from an existing
292
+ directed graph in order to create a new directed graph. For example,
293
+ here we modify the directed path graph:
294
+
295
+ >>> D = nx.DiGraph([(0, 1), (1, 2), (2, 3)])
296
+ >>> din = list(d for n, d in D.in_degree())
297
+ >>> dout = list(d for n, d in D.out_degree())
298
+ >>> din.append(1)
299
+ >>> dout[0] = 2
300
+ >>> # We now expect an edge from node 0 to a new node, node 3.
301
+ ... D = nx.directed_configuration_model(din, dout)
302
+
303
+ The returned graph is a directed multigraph, which may have parallel
304
+ edges. To remove any parallel edges from the returned graph:
305
+
306
+ >>> D = nx.DiGraph(D)
307
+
308
+ Similarly, to remove self-loops:
309
+
310
+ >>> D.remove_edges_from(nx.selfloop_edges(D))
311
+
312
+ """
313
+ if sum(in_degree_sequence) != sum(out_degree_sequence):
314
+ msg = "Invalid degree sequences: sequences must have equal sums"
315
+ raise nx.NetworkXError(msg)
316
+
317
+ if create_using is None:
318
+ create_using = nx.MultiDiGraph
319
+
320
+ G = _configuration_model(
321
+ out_degree_sequence,
322
+ create_using,
323
+ directed=True,
324
+ in_deg_sequence=in_degree_sequence,
325
+ seed=seed,
326
+ )
327
+
328
+ name = "directed configuration_model {} nodes {} edges"
329
+ return G
330
+
331
+
332
+ @py_random_state(1)
333
+ @nx._dispatchable(graphs=None, returns_graph=True)
334
+ def expected_degree_graph(w, seed=None, selfloops=True):
335
+ r"""Returns a random graph with given expected degrees.
336
+
337
+ Given a sequence of expected degrees $W=(w_0,w_1,\ldots,w_{n-1})$
338
+ of length $n$ this algorithm assigns an edge between node $u$ and
339
+ node $v$ with probability
340
+
341
+ .. math::
342
+
343
+ p_{uv} = \frac{w_u w_v}{\sum_k w_k} .
344
+
345
+ Parameters
346
+ ----------
347
+ w : list
348
+ The list of expected degrees.
349
+ selfloops: bool (default=True)
350
+ Set to False to remove the possibility of self-loop edges.
351
+ seed : integer, random_state, or None (default)
352
+ Indicator of random number generation state.
353
+ See :ref:`Randomness<randomness>`.
354
+
355
+ Returns
356
+ -------
357
+ Graph
358
+
359
+ Examples
360
+ --------
361
+ >>> z = [10 for i in range(100)]
362
+ >>> G = nx.expected_degree_graph(z)
363
+
364
+ Notes
365
+ -----
366
+ The nodes have integer labels corresponding to index of expected degrees
367
+ input sequence.
368
+
369
+ The complexity of this algorithm is $\mathcal{O}(n+m)$ where $n$ is the
370
+ number of nodes and $m$ is the expected number of edges.
371
+
372
+ The model in [1]_ includes the possibility of self-loop edges.
373
+ Set selfloops=False to produce a graph without self loops.
374
+
375
+ For finite graphs this model doesn't produce exactly the given
376
+ expected degree sequence. Instead the expected degrees are as
377
+ follows.
378
+
379
+ For the case without self loops (selfloops=False),
380
+
381
+ .. math::
382
+
383
+ E[deg(u)] = \sum_{v \ne u} p_{uv}
384
+ = w_u \left( 1 - \frac{w_u}{\sum_k w_k} \right) .
385
+
386
+
387
+ NetworkX uses the standard convention that a self-loop edge counts 2
388
+ in the degree of a node, so with self loops (selfloops=True),
389
+
390
+ .. math::
391
+
392
+ E[deg(u)] = \sum_{v \ne u} p_{uv} + 2 p_{uu}
393
+ = w_u \left( 1 + \frac{w_u}{\sum_k w_k} \right) .
394
+
395
+ References
396
+ ----------
397
+ .. [1] Fan Chung and L. Lu, Connected components in random graphs with
398
+ given expected degree sequences, Ann. Combinatorics, 6,
399
+ pp. 125-145, 2002.
400
+ .. [2] Joel Miller and Aric Hagberg,
401
+ Efficient generation of networks with given expected degrees,
402
+ in Algorithms and Models for the Web-Graph (WAW 2011),
403
+ Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,
404
+ pp. 115-126, 2011.
405
+ """
406
+ n = len(w)
407
+ G = nx.empty_graph(n)
408
+
409
+ # If there are no nodes are no edges in the graph, return the empty graph.
410
+ if n == 0 or max(w) == 0:
411
+ return G
412
+
413
+ rho = 1 / sum(w)
414
+ # Sort the weights in decreasing order. The original order of the
415
+ # weights dictates the order of the (integer) node labels, so we
416
+ # need to remember the permutation applied in the sorting.
417
+ order = sorted(enumerate(w), key=itemgetter(1), reverse=True)
418
+ mapping = {c: u for c, (u, v) in enumerate(order)}
419
+ seq = [v for u, v in order]
420
+ last = n
421
+ if not selfloops:
422
+ last -= 1
423
+ for u in range(last):
424
+ v = u
425
+ if not selfloops:
426
+ v += 1
427
+ factor = seq[u] * rho
428
+ p = min(seq[v] * factor, 1)
429
+ while v < n and p > 0:
430
+ if p != 1:
431
+ r = seed.random()
432
+ v += math.floor(math.log(r, 1 - p))
433
+ if v < n:
434
+ q = min(seq[v] * factor, 1)
435
+ if seed.random() < q / p:
436
+ G.add_edge(mapping[u], mapping[v])
437
+ v += 1
438
+ p = q
439
+ return G
440
+
441
+
442
+ @nx._dispatchable(graphs=None, returns_graph=True)
443
+ def havel_hakimi_graph(deg_sequence, create_using=None):
444
+ """Returns a simple graph with given degree sequence constructed
445
+ using the Havel-Hakimi algorithm.
446
+
447
+ Parameters
448
+ ----------
449
+ deg_sequence: list of integers
450
+ Each integer corresponds to the degree of a node (need not be sorted).
451
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
452
+ Graph type to create. If graph instance, then cleared before populated.
453
+ Directed graphs are not allowed.
454
+
455
+ Raises
456
+ ------
457
+ NetworkXException
458
+ For a non-graphical degree sequence (i.e. one
459
+ not realizable by some simple graph).
460
+
461
+ Notes
462
+ -----
463
+ The Havel-Hakimi algorithm constructs a simple graph by
464
+ successively connecting the node of highest degree to other nodes
465
+ of highest degree, resorting remaining nodes by degree, and
466
+ repeating the process. The resulting graph has a high
467
+ degree-associativity. Nodes are labeled 1,.., len(deg_sequence),
468
+ corresponding to their position in deg_sequence.
469
+
470
+ The basic algorithm is from Hakimi [1]_ and was generalized by
471
+ Kleitman and Wang [2]_.
472
+
473
+ References
474
+ ----------
475
+ .. [1] Hakimi S., On Realizability of a Set of Integers as
476
+ Degrees of the Vertices of a Linear Graph. I,
477
+ Journal of SIAM, 10(3), pp. 496-506 (1962)
478
+ .. [2] Kleitman D.J. and Wang D.L.
479
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
480
+ and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
481
+ """
482
+ if not nx.is_graphical(deg_sequence):
483
+ raise nx.NetworkXError("Invalid degree sequence")
484
+
485
+ p = len(deg_sequence)
486
+ G = nx.empty_graph(p, create_using)
487
+ if G.is_directed():
488
+ raise nx.NetworkXError("Directed graphs are not supported")
489
+ num_degs = [[] for i in range(p)]
490
+ dmax, dsum, n = 0, 0, 0
491
+ for d in deg_sequence:
492
+ # Process only the non-zero integers
493
+ if d > 0:
494
+ num_degs[d].append(n)
495
+ dmax, dsum, n = max(dmax, d), dsum + d, n + 1
496
+ # Return graph if no edges
497
+ if n == 0:
498
+ return G
499
+
500
+ modstubs = [(0, 0)] * (dmax + 1)
501
+ # Successively reduce degree sequence by removing the maximum degree
502
+ while n > 0:
503
+ # Retrieve the maximum degree in the sequence
504
+ while len(num_degs[dmax]) == 0:
505
+ dmax -= 1
506
+ # If there are not enough stubs to connect to, then the sequence is
507
+ # not graphical
508
+ if dmax > n - 1:
509
+ raise nx.NetworkXError("Non-graphical integer sequence")
510
+
511
+ # Remove largest stub in list
512
+ source = num_degs[dmax].pop()
513
+ n -= 1
514
+ # Reduce the next dmax largest stubs
515
+ mslen = 0
516
+ k = dmax
517
+ for i in range(dmax):
518
+ while len(num_degs[k]) == 0:
519
+ k -= 1
520
+ target = num_degs[k].pop()
521
+ G.add_edge(source, target)
522
+ n -= 1
523
+ if k > 1:
524
+ modstubs[mslen] = (k - 1, target)
525
+ mslen += 1
526
+ # Add back to the list any nonzero stubs that were removed
527
+ for i in range(mslen):
528
+ (stubval, stubtarget) = modstubs[i]
529
+ num_degs[stubval].append(stubtarget)
530
+ n += 1
531
+
532
+ return G
533
+
534
+
535
+ @nx._dispatchable(graphs=None, returns_graph=True)
536
+ def directed_havel_hakimi_graph(in_deg_sequence, out_deg_sequence, create_using=None):
537
+ """Returns a directed graph with the given degree sequences.
538
+
539
+ Parameters
540
+ ----------
541
+ in_deg_sequence : list of integers
542
+ Each list entry corresponds to the in-degree of a node.
543
+ out_deg_sequence : list of integers
544
+ Each list entry corresponds to the out-degree of a node.
545
+ create_using : NetworkX graph constructor, optional (default DiGraph)
546
+ Graph type to create. If graph instance, then cleared before populated.
547
+
548
+ Returns
549
+ -------
550
+ G : DiGraph
551
+ A graph with the specified degree sequences.
552
+ Nodes are labeled starting at 0 with an index
553
+ corresponding to the position in deg_sequence
554
+
555
+ Raises
556
+ ------
557
+ NetworkXError
558
+ If the degree sequences are not digraphical.
559
+
560
+ See Also
561
+ --------
562
+ configuration_model
563
+
564
+ Notes
565
+ -----
566
+ Algorithm as described by Kleitman and Wang [1]_.
567
+
568
+ References
569
+ ----------
570
+ .. [1] D.J. Kleitman and D.L. Wang
571
+ Algorithms for Constructing Graphs and Digraphs with Given Valences
572
+ and Factors Discrete Mathematics, 6(1), pp. 79-88 (1973)
573
+ """
574
+ in_deg_sequence = nx.utils.make_list_of_ints(in_deg_sequence)
575
+ out_deg_sequence = nx.utils.make_list_of_ints(out_deg_sequence)
576
+
577
+ # Process the sequences and form two heaps to store degree pairs with
578
+ # either zero or nonzero out degrees
579
+ sumin, sumout = 0, 0
580
+ nin, nout = len(in_deg_sequence), len(out_deg_sequence)
581
+ maxn = max(nin, nout)
582
+ G = nx.empty_graph(maxn, create_using, default=nx.DiGraph)
583
+ if maxn == 0:
584
+ return G
585
+ maxin = 0
586
+ stubheap, zeroheap = [], []
587
+ for n in range(maxn):
588
+ in_deg, out_deg = 0, 0
589
+ if n < nout:
590
+ out_deg = out_deg_sequence[n]
591
+ if n < nin:
592
+ in_deg = in_deg_sequence[n]
593
+ if in_deg < 0 or out_deg < 0:
594
+ raise nx.NetworkXError(
595
+ "Invalid degree sequences. Sequence values must be positive."
596
+ )
597
+ sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg)
598
+ if in_deg > 0:
599
+ stubheap.append((-1 * out_deg, -1 * in_deg, n))
600
+ elif out_deg > 0:
601
+ zeroheap.append((-1 * out_deg, n))
602
+ if sumin != sumout:
603
+ raise nx.NetworkXError(
604
+ "Invalid degree sequences. Sequences must have equal sums."
605
+ )
606
+ heapq.heapify(stubheap)
607
+ heapq.heapify(zeroheap)
608
+
609
+ modstubs = [(0, 0, 0)] * (maxin + 1)
610
+ # Successively reduce degree sequence by removing the maximum
611
+ while stubheap:
612
+ # Remove first value in the sequence with a non-zero in degree
613
+ (freeout, freein, target) = heapq.heappop(stubheap)
614
+ freein *= -1
615
+ if freein > len(stubheap) + len(zeroheap):
616
+ raise nx.NetworkXError("Non-digraphical integer sequence")
617
+
618
+ # Attach arcs from the nodes with the most stubs
619
+ mslen = 0
620
+ for i in range(freein):
621
+ if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0][0]):
622
+ (stubout, stubsource) = heapq.heappop(zeroheap)
623
+ stubin = 0
624
+ else:
625
+ (stubout, stubin, stubsource) = heapq.heappop(stubheap)
626
+ if stubout == 0:
627
+ raise nx.NetworkXError("Non-digraphical integer sequence")
628
+ G.add_edge(stubsource, target)
629
+ # Check if source is now totally connected
630
+ if stubout + 1 < 0 or stubin < 0:
631
+ modstubs[mslen] = (stubout + 1, stubin, stubsource)
632
+ mslen += 1
633
+
634
+ # Add the nodes back to the heaps that still have available stubs
635
+ for i in range(mslen):
636
+ stub = modstubs[i]
637
+ if stub[1] < 0:
638
+ heapq.heappush(stubheap, stub)
639
+ else:
640
+ heapq.heappush(zeroheap, (stub[0], stub[2]))
641
+ if freeout < 0:
642
+ heapq.heappush(zeroheap, (freeout, target))
643
+
644
+ return G
645
+
646
+
647
+ @nx._dispatchable(graphs=None, returns_graph=True)
648
+ def degree_sequence_tree(deg_sequence, create_using=None):
649
+ """Make a tree for the given degree sequence.
650
+
651
+ A tree has #nodes-#edges=1 so
652
+ the degree sequence must have
653
+ len(deg_sequence)-sum(deg_sequence)/2=1
654
+ """
655
+ # The sum of the degree sequence must be even (for any undirected graph).
656
+ degree_sum = sum(deg_sequence)
657
+ if degree_sum % 2 != 0:
658
+ msg = "Invalid degree sequence: sum of degrees must be even, not odd"
659
+ raise nx.NetworkXError(msg)
660
+ if len(deg_sequence) - degree_sum // 2 != 1:
661
+ msg = (
662
+ "Invalid degree sequence: tree must have number of nodes equal"
663
+ " to one less than the number of edges"
664
+ )
665
+ raise nx.NetworkXError(msg)
666
+ G = nx.empty_graph(0, create_using)
667
+ if G.is_directed():
668
+ raise nx.NetworkXError("Directed Graph not supported")
669
+
670
+ # Sort all degrees greater than 1 in decreasing order.
671
+ #
672
+ # TODO Does this need to be sorted in reverse order?
673
+ deg = sorted((s for s in deg_sequence if s > 1), reverse=True)
674
+
675
+ # make path graph as backbone
676
+ n = len(deg) + 2
677
+ nx.add_path(G, range(n))
678
+ last = n
679
+
680
+ # add the leaves
681
+ for source in range(1, n - 1):
682
+ nedges = deg.pop() - 2
683
+ for target in range(last, last + nedges):
684
+ G.add_edge(source, target)
685
+ last += nedges
686
+
687
+ # in case we added one too many
688
+ if len(G) > len(deg_sequence):
689
+ G.remove_node(0)
690
+ return G
691
+
692
+
693
+ @py_random_state(1)
694
+ @nx._dispatchable(graphs=None, returns_graph=True)
695
+ def random_degree_sequence_graph(sequence, seed=None, tries=10):
696
+ r"""Returns a simple random graph with the given degree sequence.
697
+
698
+ If the maximum degree $d_m$ in the sequence is $O(m^{1/4})$ then the
699
+ algorithm produces almost uniform random graphs in $O(m d_m)$ time
700
+ where $m$ is the number of edges.
701
+
702
+ Parameters
703
+ ----------
704
+ sequence : list of integers
705
+ Sequence of degrees
706
+ seed : integer, random_state, or None (default)
707
+ Indicator of random number generation state.
708
+ See :ref:`Randomness<randomness>`.
709
+ tries : int, optional
710
+ Maximum number of tries to create a graph
711
+
712
+ Returns
713
+ -------
714
+ G : Graph
715
+ A graph with the specified degree sequence.
716
+ Nodes are labeled starting at 0 with an index
717
+ corresponding to the position in the sequence.
718
+
719
+ Raises
720
+ ------
721
+ NetworkXUnfeasible
722
+ If the degree sequence is not graphical.
723
+ NetworkXError
724
+ If a graph is not produced in specified number of tries
725
+
726
+ See Also
727
+ --------
728
+ is_graphical, configuration_model
729
+
730
+ Notes
731
+ -----
732
+ The generator algorithm [1]_ is not guaranteed to produce a graph.
733
+
734
+ References
735
+ ----------
736
+ .. [1] Moshen Bayati, Jeong Han Kim, and Amin Saberi,
737
+ A sequential algorithm for generating random graphs.
738
+ Algorithmica, Volume 58, Number 4, 860-910,
739
+ DOI: 10.1007/s00453-009-9340-1
740
+
741
+ Examples
742
+ --------
743
+ >>> sequence = [1, 2, 2, 3]
744
+ >>> G = nx.random_degree_sequence_graph(sequence, seed=42)
745
+ >>> sorted(d for n, d in G.degree())
746
+ [1, 2, 2, 3]
747
+ """
748
+ DSRG = DegreeSequenceRandomGraph(sequence, seed)
749
+ for try_n in range(tries):
750
+ try:
751
+ return DSRG.generate()
752
+ except nx.NetworkXUnfeasible:
753
+ pass
754
+ raise nx.NetworkXError(f"failed to generate graph in {tries} tries")
755
+
756
+
757
+ class DegreeSequenceRandomGraph:
758
+ # class to generate random graphs with a given degree sequence
759
+ # use random_degree_sequence_graph()
760
+ def __init__(self, degree, rng):
761
+ if not nx.is_graphical(degree):
762
+ raise nx.NetworkXUnfeasible("degree sequence is not graphical")
763
+ self.rng = rng
764
+ self.degree = list(degree)
765
+ # node labels are integers 0,...,n-1
766
+ self.m = sum(self.degree) / 2.0 # number of edges
767
+ try:
768
+ self.dmax = max(self.degree) # maximum degree
769
+ except ValueError:
770
+ self.dmax = 0
771
+
772
+ def generate(self):
773
+ # remaining_degree is mapping from int->remaining degree
774
+ self.remaining_degree = dict(enumerate(self.degree))
775
+ # add all nodes to make sure we get isolated nodes
776
+ self.graph = nx.Graph()
777
+ self.graph.add_nodes_from(self.remaining_degree)
778
+ # remove zero degree nodes
779
+ for n, d in list(self.remaining_degree.items()):
780
+ if d == 0:
781
+ del self.remaining_degree[n]
782
+ if len(self.remaining_degree) > 0:
783
+ # build graph in three phases according to how many unmatched edges
784
+ self.phase1()
785
+ self.phase2()
786
+ self.phase3()
787
+ return self.graph
788
+
789
+ def update_remaining(self, u, v, aux_graph=None):
790
+ # decrement remaining nodes, modify auxiliary graph if in phase3
791
+ if aux_graph is not None:
792
+ # remove edges from auxiliary graph
793
+ aux_graph.remove_edge(u, v)
794
+ if self.remaining_degree[u] == 1:
795
+ del self.remaining_degree[u]
796
+ if aux_graph is not None:
797
+ aux_graph.remove_node(u)
798
+ else:
799
+ self.remaining_degree[u] -= 1
800
+ if self.remaining_degree[v] == 1:
801
+ del self.remaining_degree[v]
802
+ if aux_graph is not None:
803
+ aux_graph.remove_node(v)
804
+ else:
805
+ self.remaining_degree[v] -= 1
806
+
807
+ def p(self, u, v):
808
+ # degree probability
809
+ return 1 - self.degree[u] * self.degree[v] / (4.0 * self.m)
810
+
811
+ def q(self, u, v):
812
+ # remaining degree probability
813
+ norm = max(self.remaining_degree.values()) ** 2
814
+ return self.remaining_degree[u] * self.remaining_degree[v] / norm
815
+
816
+ def suitable_edge(self):
817
+ """Returns True if and only if an arbitrary remaining node can
818
+ potentially be joined with some other remaining node.
819
+
820
+ """
821
+ nodes = iter(self.remaining_degree)
822
+ u = next(nodes)
823
+ return any(v not in self.graph[u] for v in nodes)
824
+
825
+ def phase1(self):
826
+ # choose node pairs from (degree) weighted distribution
827
+ rem_deg = self.remaining_degree
828
+ while sum(rem_deg.values()) >= 2 * self.dmax**2:
829
+ u, v = sorted(random_weighted_sample(rem_deg, 2, self.rng))
830
+ if self.graph.has_edge(u, v):
831
+ continue
832
+ if self.rng.random() < self.p(u, v): # accept edge
833
+ self.graph.add_edge(u, v)
834
+ self.update_remaining(u, v)
835
+
836
+ def phase2(self):
837
+ # choose remaining nodes uniformly at random and use rejection sampling
838
+ remaining_deg = self.remaining_degree
839
+ rng = self.rng
840
+ while len(remaining_deg) >= 2 * self.dmax:
841
+ while True:
842
+ u, v = sorted(rng.sample(list(remaining_deg.keys()), 2))
843
+ if self.graph.has_edge(u, v):
844
+ continue
845
+ if rng.random() < self.q(u, v):
846
+ break
847
+ if rng.random() < self.p(u, v): # accept edge
848
+ self.graph.add_edge(u, v)
849
+ self.update_remaining(u, v)
850
+
851
+ def phase3(self):
852
+ # build potential remaining edges and choose with rejection sampling
853
+ potential_edges = combinations(self.remaining_degree, 2)
854
+ # build auxiliary graph of potential edges not already in graph
855
+ H = nx.Graph(
856
+ [(u, v) for (u, v) in potential_edges if not self.graph.has_edge(u, v)]
857
+ )
858
+ rng = self.rng
859
+ while self.remaining_degree:
860
+ if not self.suitable_edge():
861
+ raise nx.NetworkXUnfeasible("no suitable edges left")
862
+ while True:
863
+ u, v = sorted(rng.choice(list(H.edges())))
864
+ if rng.random() < self.q(u, v):
865
+ break
866
+ if rng.random() < self.p(u, v): # accept edge
867
+ self.graph.add_edge(u, v)
868
+ self.update_remaining(u, v, aux_graph=H)
llmeval-env/lib/python3.10/site-packages/networkx/generators/directed.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generators for some directed graphs, including growing network (GN) graphs and
3
+ scale-free graphs.
4
+
5
+ """
6
+
7
+ import numbers
8
+ from collections import Counter
9
+
10
+ import networkx as nx
11
+ from networkx.generators.classic import empty_graph
12
+ from networkx.utils import discrete_sequence, py_random_state, weighted_choice
13
+
14
+ __all__ = [
15
+ "gn_graph",
16
+ "gnc_graph",
17
+ "gnr_graph",
18
+ "random_k_out_graph",
19
+ "scale_free_graph",
20
+ ]
21
+
22
+
23
+ @py_random_state(3)
24
+ @nx._dispatchable(graphs=None, returns_graph=True)
25
+ def gn_graph(n, kernel=None, create_using=None, seed=None):
26
+ """Returns the growing network (GN) digraph with `n` nodes.
27
+
28
+ The GN graph is built by adding nodes one at a time with a link to one
29
+ previously added node. The target node for the link is chosen with
30
+ probability based on degree. The default attachment kernel is a linear
31
+ function of the degree of a node.
32
+
33
+ The graph is always a (directed) tree.
34
+
35
+ Parameters
36
+ ----------
37
+ n : int
38
+ The number of nodes for the generated graph.
39
+ kernel : function
40
+ The attachment kernel.
41
+ create_using : NetworkX graph constructor, optional (default DiGraph)
42
+ Graph type to create. If graph instance, then cleared before populated.
43
+ seed : integer, random_state, or None (default)
44
+ Indicator of random number generation state.
45
+ See :ref:`Randomness<randomness>`.
46
+
47
+ Examples
48
+ --------
49
+ To create the undirected GN graph, use the :meth:`~DiGraph.to_directed`
50
+ method::
51
+
52
+ >>> D = nx.gn_graph(10) # the GN graph
53
+ >>> G = D.to_undirected() # the undirected version
54
+
55
+ To specify an attachment kernel, use the `kernel` keyword argument::
56
+
57
+ >>> D = nx.gn_graph(10, kernel=lambda x: x**1.5) # A_k = k^1.5
58
+
59
+ References
60
+ ----------
61
+ .. [1] P. L. Krapivsky and S. Redner,
62
+ Organization of Growing Random Networks,
63
+ Phys. Rev. E, 63, 066123, 2001.
64
+ """
65
+ G = empty_graph(1, create_using, default=nx.DiGraph)
66
+ if not G.is_directed():
67
+ raise nx.NetworkXError("create_using must indicate a Directed Graph")
68
+
69
+ if kernel is None:
70
+
71
+ def kernel(x):
72
+ return x
73
+
74
+ if n == 1:
75
+ return G
76
+
77
+ G.add_edge(1, 0) # get started
78
+ ds = [1, 1] # degree sequence
79
+
80
+ for source in range(2, n):
81
+ # compute distribution from kernel and degree
82
+ dist = [kernel(d) for d in ds]
83
+ # choose target from discrete distribution
84
+ target = discrete_sequence(1, distribution=dist, seed=seed)[0]
85
+ G.add_edge(source, target)
86
+ ds.append(1) # the source has only one link (degree one)
87
+ ds[target] += 1 # add one to the target link degree
88
+ return G
89
+
90
+
91
+ @py_random_state(3)
92
+ @nx._dispatchable(graphs=None, returns_graph=True)
93
+ def gnr_graph(n, p, create_using=None, seed=None):
94
+ """Returns the growing network with redirection (GNR) digraph with `n`
95
+ nodes and redirection probability `p`.
96
+
97
+ The GNR graph is built by adding nodes one at a time with a link to one
98
+ previously added node. The previous target node is chosen uniformly at
99
+ random. With probability `p` the link is instead "redirected" to the
100
+ successor node of the target.
101
+
102
+ The graph is always a (directed) tree.
103
+
104
+ Parameters
105
+ ----------
106
+ n : int
107
+ The number of nodes for the generated graph.
108
+ p : float
109
+ The redirection probability.
110
+ create_using : NetworkX graph constructor, optional (default DiGraph)
111
+ Graph type to create. If graph instance, then cleared before populated.
112
+ seed : integer, random_state, or None (default)
113
+ Indicator of random number generation state.
114
+ See :ref:`Randomness<randomness>`.
115
+
116
+ Examples
117
+ --------
118
+ To create the undirected GNR graph, use the :meth:`~DiGraph.to_directed`
119
+ method::
120
+
121
+ >>> D = nx.gnr_graph(10, 0.5) # the GNR graph
122
+ >>> G = D.to_undirected() # the undirected version
123
+
124
+ References
125
+ ----------
126
+ .. [1] P. L. Krapivsky and S. Redner,
127
+ Organization of Growing Random Networks,
128
+ Phys. Rev. E, 63, 066123, 2001.
129
+ """
130
+ G = empty_graph(1, create_using, default=nx.DiGraph)
131
+ if not G.is_directed():
132
+ raise nx.NetworkXError("create_using must indicate a Directed Graph")
133
+
134
+ if n == 1:
135
+ return G
136
+
137
+ for source in range(1, n):
138
+ target = seed.randrange(0, source)
139
+ if seed.random() < p and target != 0:
140
+ target = next(G.successors(target))
141
+ G.add_edge(source, target)
142
+ return G
143
+
144
+
145
+ @py_random_state(2)
146
+ @nx._dispatchable(graphs=None, returns_graph=True)
147
+ def gnc_graph(n, create_using=None, seed=None):
148
+ """Returns the growing network with copying (GNC) digraph with `n` nodes.
149
+
150
+ The GNC graph is built by adding nodes one at a time with a link to one
151
+ previously added node (chosen uniformly at random) and to all of that
152
+ node's successors.
153
+
154
+ Parameters
155
+ ----------
156
+ n : int
157
+ The number of nodes for the generated graph.
158
+ create_using : NetworkX graph constructor, optional (default DiGraph)
159
+ Graph type to create. If graph instance, then cleared before populated.
160
+ seed : integer, random_state, or None (default)
161
+ Indicator of random number generation state.
162
+ See :ref:`Randomness<randomness>`.
163
+
164
+ References
165
+ ----------
166
+ .. [1] P. L. Krapivsky and S. Redner,
167
+ Network Growth by Copying,
168
+ Phys. Rev. E, 71, 036118, 2005k.},
169
+ """
170
+ G = empty_graph(1, create_using, default=nx.DiGraph)
171
+ if not G.is_directed():
172
+ raise nx.NetworkXError("create_using must indicate a Directed Graph")
173
+
174
+ if n == 1:
175
+ return G
176
+
177
+ for source in range(1, n):
178
+ target = seed.randrange(0, source)
179
+ for succ in G.successors(target):
180
+ G.add_edge(source, succ)
181
+ G.add_edge(source, target)
182
+ return G
183
+
184
+
185
+ @py_random_state(6)
186
+ @nx._dispatchable(graphs=None, returns_graph=True)
187
+ def scale_free_graph(
188
+ n,
189
+ alpha=0.41,
190
+ beta=0.54,
191
+ gamma=0.05,
192
+ delta_in=0.2,
193
+ delta_out=0,
194
+ seed=None,
195
+ initial_graph=None,
196
+ ):
197
+ """Returns a scale-free directed graph.
198
+
199
+ Parameters
200
+ ----------
201
+ n : integer
202
+ Number of nodes in graph
203
+ alpha : float
204
+ Probability for adding a new node connected to an existing node
205
+ chosen randomly according to the in-degree distribution.
206
+ beta : float
207
+ Probability for adding an edge between two existing nodes.
208
+ One existing node is chosen randomly according the in-degree
209
+ distribution and the other chosen randomly according to the out-degree
210
+ distribution.
211
+ gamma : float
212
+ Probability for adding a new node connected to an existing node
213
+ chosen randomly according to the out-degree distribution.
214
+ delta_in : float
215
+ Bias for choosing nodes from in-degree distribution.
216
+ delta_out : float
217
+ Bias for choosing nodes from out-degree distribution.
218
+ seed : integer, random_state, or None (default)
219
+ Indicator of random number generation state.
220
+ See :ref:`Randomness<randomness>`.
221
+ initial_graph : MultiDiGraph instance, optional
222
+ Build the scale-free graph starting from this initial MultiDiGraph,
223
+ if provided.
224
+
225
+ Returns
226
+ -------
227
+ MultiDiGraph
228
+
229
+ Examples
230
+ --------
231
+ Create a scale-free graph on one hundred nodes::
232
+
233
+ >>> G = nx.scale_free_graph(100)
234
+
235
+ Notes
236
+ -----
237
+ The sum of `alpha`, `beta`, and `gamma` must be 1.
238
+
239
+ References
240
+ ----------
241
+ .. [1] B. Bollobás, C. Borgs, J. Chayes, and O. Riordan,
242
+ Directed scale-free graphs,
243
+ Proceedings of the fourteenth annual ACM-SIAM Symposium on
244
+ Discrete Algorithms, 132--139, 2003.
245
+ """
246
+
247
+ def _choose_node(candidates, node_list, delta):
248
+ if delta > 0:
249
+ bias_sum = len(node_list) * delta
250
+ p_delta = bias_sum / (bias_sum + len(candidates))
251
+ if seed.random() < p_delta:
252
+ return seed.choice(node_list)
253
+ return seed.choice(candidates)
254
+
255
+ if initial_graph is not None and hasattr(initial_graph, "_adj"):
256
+ if not isinstance(initial_graph, nx.MultiDiGraph):
257
+ raise nx.NetworkXError("initial_graph must be a MultiDiGraph.")
258
+ G = initial_graph
259
+ else:
260
+ # Start with 3-cycle
261
+ G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 0)])
262
+
263
+ if alpha <= 0:
264
+ raise ValueError("alpha must be > 0.")
265
+ if beta <= 0:
266
+ raise ValueError("beta must be > 0.")
267
+ if gamma <= 0:
268
+ raise ValueError("gamma must be > 0.")
269
+
270
+ if abs(alpha + beta + gamma - 1.0) >= 1e-9:
271
+ raise ValueError("alpha+beta+gamma must equal 1.")
272
+
273
+ if delta_in < 0:
274
+ raise ValueError("delta_in must be >= 0.")
275
+
276
+ if delta_out < 0:
277
+ raise ValueError("delta_out must be >= 0.")
278
+
279
+ # pre-populate degree states
280
+ vs = sum((count * [idx] for idx, count in G.out_degree()), [])
281
+ ws = sum((count * [idx] for idx, count in G.in_degree()), [])
282
+
283
+ # pre-populate node state
284
+ node_list = list(G.nodes())
285
+
286
+ # see if there already are number-based nodes
287
+ numeric_nodes = [n for n in node_list if isinstance(n, numbers.Number)]
288
+ if len(numeric_nodes) > 0:
289
+ # set cursor for new nodes appropriately
290
+ cursor = max(int(n.real) for n in numeric_nodes) + 1
291
+ else:
292
+ # or start at zero
293
+ cursor = 0
294
+
295
+ while len(G) < n:
296
+ r = seed.random()
297
+
298
+ # random choice in alpha,beta,gamma ranges
299
+ if r < alpha:
300
+ # alpha
301
+ # add new node v
302
+ v = cursor
303
+ cursor += 1
304
+ # also add to node state
305
+ node_list.append(v)
306
+ # choose w according to in-degree and delta_in
307
+ w = _choose_node(ws, node_list, delta_in)
308
+
309
+ elif r < alpha + beta:
310
+ # beta
311
+ # choose v according to out-degree and delta_out
312
+ v = _choose_node(vs, node_list, delta_out)
313
+ # choose w according to in-degree and delta_in
314
+ w = _choose_node(ws, node_list, delta_in)
315
+
316
+ else:
317
+ # gamma
318
+ # choose v according to out-degree and delta_out
319
+ v = _choose_node(vs, node_list, delta_out)
320
+ # add new node w
321
+ w = cursor
322
+ cursor += 1
323
+ # also add to node state
324
+ node_list.append(w)
325
+
326
+ # add edge to graph
327
+ G.add_edge(v, w)
328
+
329
+ # update degree states
330
+ vs.append(v)
331
+ ws.append(w)
332
+
333
+ return G
334
+
335
+
336
+ @py_random_state(4)
337
+ @nx._dispatchable(graphs=None, returns_graph=True)
338
+ def random_uniform_k_out_graph(n, k, self_loops=True, with_replacement=True, seed=None):
339
+ """Returns a random `k`-out graph with uniform attachment.
340
+
341
+ A random `k`-out graph with uniform attachment is a multidigraph
342
+ generated by the following algorithm. For each node *u*, choose
343
+ `k` nodes *v* uniformly at random (with replacement). Add a
344
+ directed edge joining *u* to *v*.
345
+
346
+ Parameters
347
+ ----------
348
+ n : int
349
+ The number of nodes in the returned graph.
350
+
351
+ k : int
352
+ The out-degree of each node in the returned graph.
353
+
354
+ self_loops : bool
355
+ If True, self-loops are allowed when generating the graph.
356
+
357
+ with_replacement : bool
358
+ If True, neighbors are chosen with replacement and the
359
+ returned graph will be a directed multigraph. Otherwise,
360
+ neighbors are chosen without replacement and the returned graph
361
+ will be a directed graph.
362
+
363
+ seed : integer, random_state, or None (default)
364
+ Indicator of random number generation state.
365
+ See :ref:`Randomness<randomness>`.
366
+
367
+ Returns
368
+ -------
369
+ NetworkX graph
370
+ A `k`-out-regular directed graph generated according to the
371
+ above algorithm. It will be a multigraph if and only if
372
+ `with_replacement` is True.
373
+
374
+ Raises
375
+ ------
376
+ ValueError
377
+ If `with_replacement` is False and `k` is greater than
378
+ `n`.
379
+
380
+ See also
381
+ --------
382
+ random_k_out_graph
383
+
384
+ Notes
385
+ -----
386
+ The return digraph or multidigraph may not be strongly connected, or
387
+ even weakly connected.
388
+
389
+ If `with_replacement` is True, this function is similar to
390
+ :func:`random_k_out_graph`, if that function had parameter `alpha`
391
+ set to positive infinity.
392
+
393
+ """
394
+ if with_replacement:
395
+ create_using = nx.MultiDiGraph()
396
+
397
+ def sample(v, nodes):
398
+ if not self_loops:
399
+ nodes = nodes - {v}
400
+ return (seed.choice(list(nodes)) for i in range(k))
401
+
402
+ else:
403
+ create_using = nx.DiGraph()
404
+
405
+ def sample(v, nodes):
406
+ if not self_loops:
407
+ nodes = nodes - {v}
408
+ return seed.sample(list(nodes), k)
409
+
410
+ G = nx.empty_graph(n, create_using)
411
+ nodes = set(G)
412
+ for u in G:
413
+ G.add_edges_from((u, v) for v in sample(u, nodes))
414
+ return G
415
+
416
+
417
+ @py_random_state(4)
418
+ @nx._dispatchable(graphs=None, returns_graph=True)
419
+ def random_k_out_graph(n, k, alpha, self_loops=True, seed=None):
420
+ """Returns a random `k`-out graph with preferential attachment.
421
+
422
+ A random `k`-out graph with preferential attachment is a
423
+ multidigraph generated by the following algorithm.
424
+
425
+ 1. Begin with an empty digraph, and initially set each node to have
426
+ weight `alpha`.
427
+ 2. Choose a node `u` with out-degree less than `k` uniformly at
428
+ random.
429
+ 3. Choose a node `v` from with probability proportional to its
430
+ weight.
431
+ 4. Add a directed edge from `u` to `v`, and increase the weight
432
+ of `v` by one.
433
+ 5. If each node has out-degree `k`, halt, otherwise repeat from
434
+ step 2.
435
+
436
+ For more information on this model of random graph, see [1].
437
+
438
+ Parameters
439
+ ----------
440
+ n : int
441
+ The number of nodes in the returned graph.
442
+
443
+ k : int
444
+ The out-degree of each node in the returned graph.
445
+
446
+ alpha : float
447
+ A positive :class:`float` representing the initial weight of
448
+ each vertex. A higher number means that in step 3 above, nodes
449
+ will be chosen more like a true uniformly random sample, and a
450
+ lower number means that nodes are more likely to be chosen as
451
+ their in-degree increases. If this parameter is not positive, a
452
+ :exc:`ValueError` is raised.
453
+
454
+ self_loops : bool
455
+ If True, self-loops are allowed when generating the graph.
456
+
457
+ seed : integer, random_state, or None (default)
458
+ Indicator of random number generation state.
459
+ See :ref:`Randomness<randomness>`.
460
+
461
+ Returns
462
+ -------
463
+ :class:`~networkx.classes.MultiDiGraph`
464
+ A `k`-out-regular multidigraph generated according to the above
465
+ algorithm.
466
+
467
+ Raises
468
+ ------
469
+ ValueError
470
+ If `alpha` is not positive.
471
+
472
+ Notes
473
+ -----
474
+ The returned multidigraph may not be strongly connected, or even
475
+ weakly connected.
476
+
477
+ References
478
+ ----------
479
+ [1]: Peterson, Nicholas R., and Boris Pittel.
480
+ "Distance between two random `k`-out digraphs, with and without
481
+ preferential attachment."
482
+ arXiv preprint arXiv:1311.5961 (2013).
483
+ <https://arxiv.org/abs/1311.5961>
484
+
485
+ """
486
+ if alpha < 0:
487
+ raise ValueError("alpha must be positive")
488
+ G = nx.empty_graph(n, create_using=nx.MultiDiGraph)
489
+ weights = Counter({v: alpha for v in G})
490
+ for i in range(k * n):
491
+ u = seed.choice([v for v, d in G.out_degree() if d < k])
492
+ # If self-loops are not allowed, make the source node `u` have
493
+ # weight zero.
494
+ if not self_loops:
495
+ adjustment = Counter({u: weights[u]})
496
+ else:
497
+ adjustment = Counter()
498
+ v = weighted_choice(weights - adjustment, seed=seed)
499
+ G.add_edge(u, v)
500
+ weights[v] += 1
501
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/duplication.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for generating graphs based on the "duplication" method.
2
+
3
+ These graph generators start with a small initial graph then duplicate
4
+ nodes and (partially) duplicate their edges. These functions are
5
+ generally inspired by biological networks.
6
+
7
+ """
8
+ import networkx as nx
9
+ from networkx.exception import NetworkXError
10
+ from networkx.utils import py_random_state
11
+
12
+ __all__ = ["partial_duplication_graph", "duplication_divergence_graph"]
13
+
14
+
15
+ @py_random_state(4)
16
+ @nx._dispatchable(graphs=None, returns_graph=True)
17
+ def partial_duplication_graph(N, n, p, q, seed=None):
18
+ """Returns a random graph using the partial duplication model.
19
+
20
+ Parameters
21
+ ----------
22
+ N : int
23
+ The total number of nodes in the final graph.
24
+
25
+ n : int
26
+ The number of nodes in the initial clique.
27
+
28
+ p : float
29
+ The probability of joining each neighbor of a node to the
30
+ duplicate node. Must be a number in the between zero and one,
31
+ inclusive.
32
+
33
+ q : float
34
+ The probability of joining the source node to the duplicate
35
+ node. Must be a number in the between zero and one, inclusive.
36
+
37
+ seed : integer, random_state, or None (default)
38
+ Indicator of random number generation state.
39
+ See :ref:`Randomness<randomness>`.
40
+
41
+ Notes
42
+ -----
43
+ A graph of nodes is grown by creating a fully connected graph
44
+ of size `n`. The following procedure is then repeated until
45
+ a total of `N` nodes have been reached.
46
+
47
+ 1. A random node, *u*, is picked and a new node, *v*, is created.
48
+ 2. For each neighbor of *u* an edge from the neighbor to *v* is created
49
+ with probability `p`.
50
+ 3. An edge from *u* to *v* is created with probability `q`.
51
+
52
+ This algorithm appears in [1].
53
+
54
+ This implementation allows the possibility of generating
55
+ disconnected graphs.
56
+
57
+ References
58
+ ----------
59
+ .. [1] Knudsen Michael, and Carsten Wiuf. "A Markov chain approach to
60
+ randomly grown graphs." Journal of Applied Mathematics 2008.
61
+ <https://doi.org/10.1155/2008/190836>
62
+
63
+ """
64
+ if p < 0 or p > 1 or q < 0 or q > 1:
65
+ msg = "partial duplication graph must have 0 <= p, q <= 1."
66
+ raise NetworkXError(msg)
67
+ if n > N:
68
+ raise NetworkXError("partial duplication graph must have n <= N.")
69
+
70
+ G = nx.complete_graph(n)
71
+ for new_node in range(n, N):
72
+ # Pick a random vertex, u, already in the graph.
73
+ src_node = seed.randint(0, new_node - 1)
74
+
75
+ # Add a new vertex, v, to the graph.
76
+ G.add_node(new_node)
77
+
78
+ # For each neighbor of u...
79
+ for nbr_node in list(nx.all_neighbors(G, src_node)):
80
+ # Add the neighbor to v with probability p.
81
+ if seed.random() < p:
82
+ G.add_edge(new_node, nbr_node)
83
+
84
+ # Join v and u with probability q.
85
+ if seed.random() < q:
86
+ G.add_edge(new_node, src_node)
87
+ return G
88
+
89
+
90
+ @py_random_state(2)
91
+ @nx._dispatchable(graphs=None, returns_graph=True)
92
+ def duplication_divergence_graph(n, p, seed=None):
93
+ """Returns an undirected graph using the duplication-divergence model.
94
+
95
+ A graph of `n` nodes is created by duplicating the initial nodes
96
+ and retaining edges incident to the original nodes with a retention
97
+ probability `p`.
98
+
99
+ Parameters
100
+ ----------
101
+ n : int
102
+ The desired number of nodes in the graph.
103
+ p : float
104
+ The probability for retaining the edge of the replicated node.
105
+ seed : integer, random_state, or None (default)
106
+ Indicator of random number generation state.
107
+ See :ref:`Randomness<randomness>`.
108
+
109
+ Returns
110
+ -------
111
+ G : Graph
112
+
113
+ Raises
114
+ ------
115
+ NetworkXError
116
+ If `p` is not a valid probability.
117
+ If `n` is less than 2.
118
+
119
+ Notes
120
+ -----
121
+ This algorithm appears in [1].
122
+
123
+ This implementation disallows the possibility of generating
124
+ disconnected graphs.
125
+
126
+ References
127
+ ----------
128
+ .. [1] I. Ispolatov, P. L. Krapivsky, A. Yuryev,
129
+ "Duplication-divergence model of protein interaction network",
130
+ Phys. Rev. E, 71, 061911, 2005.
131
+
132
+ """
133
+ if p > 1 or p < 0:
134
+ msg = f"NetworkXError p={p} is not in [0,1]."
135
+ raise nx.NetworkXError(msg)
136
+ if n < 2:
137
+ msg = "n must be greater than or equal to 2"
138
+ raise nx.NetworkXError(msg)
139
+
140
+ G = nx.Graph()
141
+
142
+ # Initialize the graph with two connected nodes.
143
+ G.add_edge(0, 1)
144
+ i = 2
145
+ while i < n:
146
+ # Choose a random node from current graph to duplicate.
147
+ random_node = seed.choice(list(G))
148
+ # Make the replica.
149
+ G.add_node(i)
150
+ # flag indicates whether at least one edge is connected on the replica.
151
+ flag = False
152
+ for nbr in G.neighbors(random_node):
153
+ if seed.random() < p:
154
+ # Link retention step.
155
+ G.add_edge(i, nbr)
156
+ flag = True
157
+ if not flag:
158
+ # Delete replica if no edges retained.
159
+ G.remove_node(i)
160
+ else:
161
+ # Successful duplication.
162
+ i += 1
163
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/ego.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ego graph.
3
+ """
4
+ __all__ = ["ego_graph"]
5
+
6
+ import networkx as nx
7
+
8
+
9
+ @nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
10
+ def ego_graph(G, n, radius=1, center=True, undirected=False, distance=None):
11
+ """Returns induced subgraph of neighbors centered at node n within
12
+ a given radius.
13
+
14
+ Parameters
15
+ ----------
16
+ G : graph
17
+ A NetworkX Graph or DiGraph
18
+
19
+ n : node
20
+ A single node
21
+
22
+ radius : number, optional
23
+ Include all neighbors of distance<=radius from n.
24
+
25
+ center : bool, optional
26
+ If False, do not include center node in graph
27
+
28
+ undirected : bool, optional
29
+ If True use both in- and out-neighbors of directed graphs.
30
+
31
+ distance : key, optional
32
+ Use specified edge data key as distance. For example, setting
33
+ distance='weight' will use the edge weight to measure the
34
+ distance from the node n.
35
+
36
+ Notes
37
+ -----
38
+ For directed graphs D this produces the "out" neighborhood
39
+ or successors. If you want the neighborhood of predecessors
40
+ first reverse the graph with D.reverse(). If you want both
41
+ directions use the keyword argument undirected=True.
42
+
43
+ Node, edge, and graph attributes are copied to the returned subgraph.
44
+ """
45
+ if undirected:
46
+ if distance is not None:
47
+ sp, _ = nx.single_source_dijkstra(
48
+ G.to_undirected(), n, cutoff=radius, weight=distance
49
+ )
50
+ else:
51
+ sp = dict(
52
+ nx.single_source_shortest_path_length(
53
+ G.to_undirected(), n, cutoff=radius
54
+ )
55
+ )
56
+ else:
57
+ if distance is not None:
58
+ sp, _ = nx.single_source_dijkstra(G, n, cutoff=radius, weight=distance)
59
+ else:
60
+ sp = dict(nx.single_source_shortest_path_length(G, n, cutoff=radius))
61
+
62
+ H = G.subgraph(sp).copy()
63
+ if not center:
64
+ H.remove_node(n)
65
+ return H
llmeval-env/lib/python3.10/site-packages/networkx/generators/expanders.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides explicit constructions of expander graphs.
2
+
3
+ """
4
+ import itertools
5
+
6
+ import networkx as nx
7
+
8
+ __all__ = [
9
+ "margulis_gabber_galil_graph",
10
+ "chordal_cycle_graph",
11
+ "paley_graph",
12
+ "maybe_regular_expander",
13
+ "is_regular_expander",
14
+ "random_regular_expander_graph",
15
+ ]
16
+
17
+
18
+ # Other discrete torus expanders can be constructed by using the following edge
19
+ # sets. For more information, see Chapter 4, "Expander Graphs", in
20
+ # "Pseudorandomness", by Salil Vadhan.
21
+ #
22
+ # For a directed expander, add edges from (x, y) to:
23
+ #
24
+ # (x, y),
25
+ # ((x + 1) % n, y),
26
+ # (x, (y + 1) % n),
27
+ # (x, (x + y) % n),
28
+ # (-y % n, x)
29
+ #
30
+ # For an undirected expander, add the reverse edges.
31
+ #
32
+ # Also appearing in the paper of Gabber and Galil:
33
+ #
34
+ # (x, y),
35
+ # (x, (x + y) % n),
36
+ # (x, (x + y + 1) % n),
37
+ # ((x + y) % n, y),
38
+ # ((x + y + 1) % n, y)
39
+ #
40
+ # and:
41
+ #
42
+ # (x, y),
43
+ # ((x + 2*y) % n, y),
44
+ # ((x + (2*y + 1)) % n, y),
45
+ # ((x + (2*y + 2)) % n, y),
46
+ # (x, (y + 2*x) % n),
47
+ # (x, (y + (2*x + 1)) % n),
48
+ # (x, (y + (2*x + 2)) % n),
49
+ #
50
+ @nx._dispatchable(graphs=None, returns_graph=True)
51
+ def margulis_gabber_galil_graph(n, create_using=None):
52
+ r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes.
53
+
54
+ The undirected MultiGraph is regular with degree `8`. Nodes are integer
55
+ pairs. The second-largest eigenvalue of the adjacency matrix of the graph
56
+ is at most `5 \sqrt{2}`, regardless of `n`.
57
+
58
+ Parameters
59
+ ----------
60
+ n : int
61
+ Determines the number of nodes in the graph: `n^2`.
62
+ create_using : NetworkX graph constructor, optional (default MultiGraph)
63
+ Graph type to create. If graph instance, then cleared before populated.
64
+
65
+ Returns
66
+ -------
67
+ G : graph
68
+ The constructed undirected multigraph.
69
+
70
+ Raises
71
+ ------
72
+ NetworkXError
73
+ If the graph is directed or not a multigraph.
74
+
75
+ """
76
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
77
+ if G.is_directed() or not G.is_multigraph():
78
+ msg = "`create_using` must be an undirected multigraph."
79
+ raise nx.NetworkXError(msg)
80
+
81
+ for x, y in itertools.product(range(n), repeat=2):
82
+ for u, v in (
83
+ ((x + 2 * y) % n, y),
84
+ ((x + (2 * y + 1)) % n, y),
85
+ (x, (y + 2 * x) % n),
86
+ (x, (y + (2 * x + 1)) % n),
87
+ ):
88
+ G.add_edge((x, y), (u, v))
89
+ G.graph["name"] = f"margulis_gabber_galil_graph({n})"
90
+ return G
91
+
92
+
93
+ @nx._dispatchable(graphs=None, returns_graph=True)
94
+ def chordal_cycle_graph(p, create_using=None):
95
+ """Returns the chordal cycle graph on `p` nodes.
96
+
97
+ The returned graph is a cycle graph on `p` nodes with chords joining each
98
+ vertex `x` to its inverse modulo `p`. This graph is a (mildly explicit)
99
+ 3-regular expander [1]_.
100
+
101
+ `p` *must* be a prime number.
102
+
103
+ Parameters
104
+ ----------
105
+ p : a prime number
106
+
107
+ The number of vertices in the graph. This also indicates where the
108
+ chordal edges in the cycle will be created.
109
+
110
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
111
+ Graph type to create. If graph instance, then cleared before populated.
112
+
113
+ Returns
114
+ -------
115
+ G : graph
116
+ The constructed undirected multigraph.
117
+
118
+ Raises
119
+ ------
120
+ NetworkXError
121
+
122
+ If `create_using` indicates directed or not a multigraph.
123
+
124
+ References
125
+ ----------
126
+
127
+ .. [1] Theorem 4.4.2 in A. Lubotzky. "Discrete groups, expanding graphs and
128
+ invariant measures", volume 125 of Progress in Mathematics.
129
+ Birkhäuser Verlag, Basel, 1994.
130
+
131
+ """
132
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
133
+ if G.is_directed() or not G.is_multigraph():
134
+ msg = "`create_using` must be an undirected multigraph."
135
+ raise nx.NetworkXError(msg)
136
+
137
+ for x in range(p):
138
+ left = (x - 1) % p
139
+ right = (x + 1) % p
140
+ # Here we apply Fermat's Little Theorem to compute the multiplicative
141
+ # inverse of x in Z/pZ. By Fermat's Little Theorem,
142
+ #
143
+ # x^p = x (mod p)
144
+ #
145
+ # Therefore,
146
+ #
147
+ # x * x^(p - 2) = 1 (mod p)
148
+ #
149
+ # The number 0 is a special case: we just let its inverse be itself.
150
+ chord = pow(x, p - 2, p) if x > 0 else 0
151
+ for y in (left, right, chord):
152
+ G.add_edge(x, y)
153
+ G.graph["name"] = f"chordal_cycle_graph({p})"
154
+ return G
155
+
156
+
157
+ @nx._dispatchable(graphs=None, returns_graph=True)
158
+ def paley_graph(p, create_using=None):
159
+ r"""Returns the Paley $\frac{(p-1)}{2}$ -regular graph on $p$ nodes.
160
+
161
+ The returned graph is a graph on $\mathbb{Z}/p\mathbb{Z}$ with edges between $x$ and $y$
162
+ if and only if $x-y$ is a nonzero square in $\mathbb{Z}/p\mathbb{Z}$.
163
+
164
+ If $p \equiv 1 \pmod 4$, $-1$ is a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore $x-y$ is a square if and
165
+ only if $y-x$ is also a square, i.e the edges in the Paley graph are symmetric.
166
+
167
+ If $p \equiv 3 \pmod 4$, $-1$ is not a square in $\mathbb{Z}/p\mathbb{Z}$ and therefore either $x-y$ or $y-x$
168
+ is a square in $\mathbb{Z}/p\mathbb{Z}$ but not both.
169
+
170
+ Note that a more general definition of Paley graphs extends this construction
171
+ to graphs over $q=p^n$ vertices, by using the finite field $F_q$ instead of $\mathbb{Z}/p\mathbb{Z}$.
172
+ This construction requires to compute squares in general finite fields and is
173
+ not what is implemented here (i.e `paley_graph(25)` does not return the true
174
+ Paley graph associated with $5^2$).
175
+
176
+ Parameters
177
+ ----------
178
+ p : int, an odd prime number.
179
+
180
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
181
+ Graph type to create. If graph instance, then cleared before populated.
182
+
183
+ Returns
184
+ -------
185
+ G : graph
186
+ The constructed directed graph.
187
+
188
+ Raises
189
+ ------
190
+ NetworkXError
191
+ If the graph is a multigraph.
192
+
193
+ References
194
+ ----------
195
+ Chapter 13 in B. Bollobas, Random Graphs. Second edition.
196
+ Cambridge Studies in Advanced Mathematics, 73.
197
+ Cambridge University Press, Cambridge (2001).
198
+ """
199
+ G = nx.empty_graph(0, create_using, default=nx.DiGraph)
200
+ if G.is_multigraph():
201
+ msg = "`create_using` cannot be a multigraph."
202
+ raise nx.NetworkXError(msg)
203
+
204
+ # Compute the squares in Z/pZ.
205
+ # Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ
206
+ # when is prime).
207
+ square_set = {(x**2) % p for x in range(1, p) if (x**2) % p != 0}
208
+
209
+ for x in range(p):
210
+ for x2 in square_set:
211
+ G.add_edge(x, (x + x2) % p)
212
+ G.graph["name"] = f"paley({p})"
213
+ return G
214
+
215
+
216
+ @nx.utils.decorators.np_random_state("seed")
217
+ @nx._dispatchable(graphs=None, returns_graph=True)
218
+ def maybe_regular_expander(n, d, *, create_using=None, max_tries=100, seed=None):
219
+ r"""Utility for creating a random regular expander.
220
+
221
+ Returns a random $d$-regular graph on $n$ nodes which is an expander
222
+ graph with very good probability.
223
+
224
+ Parameters
225
+ ----------
226
+ n : int
227
+ The number of nodes.
228
+ d : int
229
+ The degree of each node.
230
+ create_using : Graph Instance or Constructor
231
+ Indicator of type of graph to return.
232
+ If a Graph-type instance, then clear and use it.
233
+ If a constructor, call it to create an empty graph.
234
+ Use the Graph constructor by default.
235
+ max_tries : int. (default: 100)
236
+ The number of allowed loops when generating each independent cycle
237
+ seed : (default: None)
238
+ Seed used to set random number generation state. See :ref`Randomness<randomness>`.
239
+
240
+ Notes
241
+ -----
242
+ The nodes are numbered from $0$ to $n - 1$.
243
+
244
+ The graph is generated by taking $d / 2$ random independent cycles.
245
+
246
+ Joel Friedman proved that in this model the resulting
247
+ graph is an expander with probability
248
+ $1 - O(n^{-\tau})$ where $\tau = \lceil (\sqrt{d - 1}) / 2 \rceil - 1$. [1]_
249
+
250
+ Examples
251
+ --------
252
+ >>> G = nx.maybe_regular_expander(n=200, d=6, seed=8020)
253
+
254
+ Returns
255
+ -------
256
+ G : graph
257
+ The constructed undirected graph.
258
+
259
+ Raises
260
+ ------
261
+ NetworkXError
262
+ If $d % 2 != 0$ as the degree must be even.
263
+ If $n - 1$ is less than $ 2d $ as the graph is complete at most.
264
+ If max_tries is reached
265
+
266
+ See Also
267
+ --------
268
+ is_regular_expander
269
+ random_regular_expander_graph
270
+
271
+ References
272
+ ----------
273
+ .. [1] Joel Friedman,
274
+ A Proof of Alon’s Second Eigenvalue Conjecture and Related Problems, 2004
275
+ https://arxiv.org/abs/cs/0405020
276
+
277
+ """
278
+
279
+ import numpy as np
280
+
281
+ if n < 1:
282
+ raise nx.NetworkXError("n must be a positive integer")
283
+
284
+ if not (d >= 2):
285
+ raise nx.NetworkXError("d must be greater than or equal to 2")
286
+
287
+ if not (d % 2 == 0):
288
+ raise nx.NetworkXError("d must be even")
289
+
290
+ if not (n - 1 >= d):
291
+ raise nx.NetworkXError(
292
+ f"Need n-1>= d to have room for {d//2} independent cycles with {n} nodes"
293
+ )
294
+
295
+ G = nx.empty_graph(n, create_using)
296
+
297
+ if n < 2:
298
+ return G
299
+
300
+ cycles = []
301
+ edges = set()
302
+
303
+ # Create d / 2 cycles
304
+ for i in range(d // 2):
305
+ iterations = max_tries
306
+ # Make sure the cycles are independent to have a regular graph
307
+ while len(edges) != (i + 1) * n:
308
+ iterations -= 1
309
+ # Faster than random.permutation(n) since there are only
310
+ # (n-1)! distinct cycles against n! permutations of size n
311
+ cycle = seed.permutation(n - 1).tolist()
312
+ cycle.append(n - 1)
313
+
314
+ new_edges = {
315
+ (u, v)
316
+ for u, v in nx.utils.pairwise(cycle, cyclic=True)
317
+ if (u, v) not in edges and (v, u) not in edges
318
+ }
319
+ # If the new cycle has no edges in common with previous cycles
320
+ # then add it to the list otherwise try again
321
+ if len(new_edges) == n:
322
+ cycles.append(cycle)
323
+ edges.update(new_edges)
324
+
325
+ if iterations == 0:
326
+ raise nx.NetworkXError("Too many iterations in maybe_regular_expander")
327
+
328
+ G.add_edges_from(edges)
329
+
330
+ return G
331
+
332
+
333
+ @nx.utils.not_implemented_for("directed")
334
+ @nx.utils.not_implemented_for("multigraph")
335
+ @nx._dispatchable(preserve_edge_attrs={"G": {"weight": 1}})
336
+ def is_regular_expander(G, *, epsilon=0):
337
+ r"""Determines whether the graph G is a regular expander. [1]_
338
+
339
+ An expander graph is a sparse graph with strong connectivity properties.
340
+
341
+ More precisely, this helper checks whether the graph is a
342
+ regular $(n, d, \lambda)$-expander with $\lambda$ close to
343
+ the Alon-Boppana bound and given by
344
+ $\lambda = 2 \sqrt{d - 1} + \epsilon$. [2]_
345
+
346
+ In the case where $\epsilon = 0$ then if the graph successfully passes the test
347
+ it is a Ramanujan graph. [3]_
348
+
349
+ A Ramanujan graph has spectral gap almost as large as possible, which makes them
350
+ excellent expanders.
351
+
352
+ Parameters
353
+ ----------
354
+ G : NetworkX graph
355
+ epsilon : int, float, default=0
356
+
357
+ Returns
358
+ -------
359
+ bool
360
+ Whether the given graph is a regular $(n, d, \lambda)$-expander
361
+ where $\lambda = 2 \sqrt{d - 1} + \epsilon$.
362
+
363
+ Examples
364
+ --------
365
+ >>> G = nx.random_regular_expander_graph(20, 4)
366
+ >>> nx.is_regular_expander(G)
367
+ True
368
+
369
+ See Also
370
+ --------
371
+ maybe_regular_expander
372
+ random_regular_expander_graph
373
+
374
+ References
375
+ ----------
376
+ .. [1] Expander graph, https://en.wikipedia.org/wiki/Expander_graph
377
+ .. [2] Alon-Boppana bound, https://en.wikipedia.org/wiki/Alon%E2%80%93Boppana_bound
378
+ .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph
379
+
380
+ """
381
+
382
+ import numpy as np
383
+ from scipy.sparse.linalg import eigsh
384
+
385
+ if epsilon < 0:
386
+ raise nx.NetworkXError("epsilon must be non negative")
387
+
388
+ if not nx.is_regular(G):
389
+ return False
390
+
391
+ _, d = nx.utils.arbitrary_element(G.degree)
392
+
393
+ A = nx.adjacency_matrix(G, dtype=float)
394
+ lams = eigsh(A, which="LM", k=2, return_eigenvectors=False)
395
+
396
+ # lambda2 is the second biggest eigenvalue
397
+ lambda2 = min(lams)
398
+
399
+ # Use bool() to convert numpy scalar to Python Boolean
400
+ return bool(abs(lambda2) < 2 ** np.sqrt(d - 1) + epsilon)
401
+
402
+
403
+ @nx.utils.decorators.np_random_state("seed")
404
+ @nx._dispatchable(graphs=None, returns_graph=True)
405
+ def random_regular_expander_graph(
406
+ n, d, *, epsilon=0, create_using=None, max_tries=100, seed=None
407
+ ):
408
+ r"""Returns a random regular expander graph on $n$ nodes with degree $d$.
409
+
410
+ An expander graph is a sparse graph with strong connectivity properties. [1]_
411
+
412
+ More precisely the returned graph is a $(n, d, \lambda)$-expander with
413
+ $\lambda = 2 \sqrt{d - 1} + \epsilon$, close to the Alon-Boppana bound. [2]_
414
+
415
+ In the case where $\epsilon = 0$ it returns a Ramanujan graph.
416
+ A Ramanujan graph has spectral gap almost as large as possible,
417
+ which makes them excellent expanders. [3]_
418
+
419
+ Parameters
420
+ ----------
421
+ n : int
422
+ The number of nodes.
423
+ d : int
424
+ The degree of each node.
425
+ epsilon : int, float, default=0
426
+ max_tries : int, (default: 100)
427
+ The number of allowed loops, also used in the maybe_regular_expander utility
428
+ seed : (default: None)
429
+ Seed used to set random number generation state. See :ref`Randomness<randomness>`.
430
+
431
+ Raises
432
+ ------
433
+ NetworkXError
434
+ If max_tries is reached
435
+
436
+ Examples
437
+ --------
438
+ >>> G = nx.random_regular_expander_graph(20, 4)
439
+ >>> nx.is_regular_expander(G)
440
+ True
441
+
442
+ Notes
443
+ -----
444
+ This loops over `maybe_regular_expander` and can be slow when
445
+ $n$ is too big or $\epsilon$ too small.
446
+
447
+ See Also
448
+ --------
449
+ maybe_regular_expander
450
+ is_regular_expander
451
+
452
+ References
453
+ ----------
454
+ .. [1] Expander graph, https://en.wikipedia.org/wiki/Expander_graph
455
+ .. [2] Alon-Boppana bound, https://en.wikipedia.org/wiki/Alon%E2%80%93Boppana_bound
456
+ .. [3] Ramanujan graphs, https://en.wikipedia.org/wiki/Ramanujan_graph
457
+
458
+ """
459
+ G = maybe_regular_expander(
460
+ n, d, create_using=create_using, max_tries=max_tries, seed=seed
461
+ )
462
+ iterations = max_tries
463
+
464
+ while not is_regular_expander(G, epsilon=epsilon):
465
+ iterations -= 1
466
+ G = maybe_regular_expander(
467
+ n=n, d=d, create_using=create_using, max_tries=max_tries, seed=seed
468
+ )
469
+
470
+ if iterations == 0:
471
+ raise nx.NetworkXError(
472
+ "Too many iterations in random_regular_expander_graph"
473
+ )
474
+
475
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/geometric.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generators for geometric graphs.
2
+ """
3
+
4
+ import math
5
+ from bisect import bisect_left
6
+ from itertools import accumulate, combinations, product
7
+
8
+ import networkx as nx
9
+ from networkx.utils import py_random_state
10
+
11
+ __all__ = [
12
+ "geometric_edges",
13
+ "geographical_threshold_graph",
14
+ "navigable_small_world_graph",
15
+ "random_geometric_graph",
16
+ "soft_random_geometric_graph",
17
+ "thresholded_random_geometric_graph",
18
+ "waxman_graph",
19
+ "geometric_soft_configuration_graph",
20
+ ]
21
+
22
+
23
+ @nx._dispatchable(node_attrs="pos_name")
24
+ def geometric_edges(G, radius, p=2, *, pos_name="pos"):
25
+ """Returns edge list of node pairs within `radius` of each other.
26
+
27
+ Parameters
28
+ ----------
29
+ G : networkx graph
30
+ The graph from which to generate the edge list. The nodes in `G` should
31
+ have an attribute ``pos`` corresponding to the node position, which is
32
+ used to compute the distance to other nodes.
33
+ radius : scalar
34
+ The distance threshold. Edges are included in the edge list if the
35
+ distance between the two nodes is less than `radius`.
36
+ pos_name : string, default="pos"
37
+ The name of the node attribute which represents the position of each
38
+ node in 2D coordinates. Every node in the Graph must have this attribute.
39
+ p : scalar, default=2
40
+ The `Minkowski distance metric
41
+ <https://en.wikipedia.org/wiki/Minkowski_distance>`_ used to compute
42
+ distances. The default value is 2, i.e. Euclidean distance.
43
+
44
+ Returns
45
+ -------
46
+ edges : list
47
+ List of edges whose distances are less than `radius`
48
+
49
+ Notes
50
+ -----
51
+ Radius uses Minkowski distance metric `p`.
52
+ If scipy is available, `scipy.spatial.cKDTree` is used to speed computation.
53
+
54
+ Examples
55
+ --------
56
+ Create a graph with nodes that have a "pos" attribute representing 2D
57
+ coordinates.
58
+
59
+ >>> G = nx.Graph()
60
+ >>> G.add_nodes_from(
61
+ ... [
62
+ ... (0, {"pos": (0, 0)}),
63
+ ... (1, {"pos": (3, 0)}),
64
+ ... (2, {"pos": (8, 0)}),
65
+ ... ]
66
+ ... )
67
+ >>> nx.geometric_edges(G, radius=1)
68
+ []
69
+ >>> nx.geometric_edges(G, radius=4)
70
+ [(0, 1)]
71
+ >>> nx.geometric_edges(G, radius=6)
72
+ [(0, 1), (1, 2)]
73
+ >>> nx.geometric_edges(G, radius=9)
74
+ [(0, 1), (0, 2), (1, 2)]
75
+ """
76
+ # Input validation - every node must have a "pos" attribute
77
+ for n, pos in G.nodes(data=pos_name):
78
+ if pos is None:
79
+ raise nx.NetworkXError(
80
+ f"Node {n} (and all nodes) must have a '{pos_name}' attribute."
81
+ )
82
+
83
+ # NOTE: See _geometric_edges for the actual implementation. The reason this
84
+ # is split into two functions is to avoid the overhead of input validation
85
+ # every time the function is called internally in one of the other
86
+ # geometric generators
87
+ return _geometric_edges(G, radius, p, pos_name)
88
+
89
+
90
+ def _geometric_edges(G, radius, p, pos_name):
91
+ """
92
+ Implements `geometric_edges` without input validation. See `geometric_edges`
93
+ for complete docstring.
94
+ """
95
+ nodes_pos = G.nodes(data=pos_name)
96
+ try:
97
+ import scipy as sp
98
+ except ImportError:
99
+ # no scipy KDTree so compute by for-loop
100
+ radius_p = radius**p
101
+ edges = [
102
+ (u, v)
103
+ for (u, pu), (v, pv) in combinations(nodes_pos, 2)
104
+ if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p
105
+ ]
106
+ return edges
107
+ # scipy KDTree is available
108
+ nodes, coords = list(zip(*nodes_pos))
109
+ kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator.
110
+ edge_indexes = kdtree.query_pairs(radius, p)
111
+ edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)]
112
+ return edges
113
+
114
+
115
+ @py_random_state(5)
116
+ @nx._dispatchable(graphs=None, returns_graph=True)
117
+ def random_geometric_graph(
118
+ n, radius, dim=2, pos=None, p=2, seed=None, *, pos_name="pos"
119
+ ):
120
+ """Returns a random geometric graph in the unit cube of dimensions `dim`.
121
+
122
+ The random geometric graph model places `n` nodes uniformly at
123
+ random in the unit cube. Two nodes are joined by an edge if the
124
+ distance between the nodes is at most `radius`.
125
+
126
+ Edges are determined using a KDTree when SciPy is available.
127
+ This reduces the time complexity from $O(n^2)$ to $O(n)$.
128
+
129
+ Parameters
130
+ ----------
131
+ n : int or iterable
132
+ Number of nodes or iterable of nodes
133
+ radius: float
134
+ Distance threshold value
135
+ dim : int, optional
136
+ Dimension of graph
137
+ pos : dict, optional
138
+ A dictionary keyed by node with node positions as values.
139
+ p : float, optional
140
+ Which Minkowski distance metric to use. `p` has to meet the condition
141
+ ``1 <= p <= infinity``.
142
+
143
+ If this argument is not specified, the :math:`L^2` metric
144
+ (the Euclidean distance metric), p = 2 is used.
145
+ This should not be confused with the `p` of an Erdős-Rényi random
146
+ graph, which represents probability.
147
+ seed : integer, random_state, or None (default)
148
+ Indicator of random number generation state.
149
+ See :ref:`Randomness<randomness>`.
150
+ pos_name : string, default="pos"
151
+ The name of the node attribute which represents the position
152
+ in 2D coordinates of the node in the returned graph.
153
+
154
+ Returns
155
+ -------
156
+ Graph
157
+ A random geometric graph, undirected and without self-loops.
158
+ Each node has a node attribute ``'pos'`` that stores the
159
+ position of that node in Euclidean space as provided by the
160
+ ``pos`` keyword argument or, if ``pos`` was not provided, as
161
+ generated by this function.
162
+
163
+ Examples
164
+ --------
165
+ Create a random geometric graph on twenty nodes where nodes are joined by
166
+ an edge if their distance is at most 0.1::
167
+
168
+ >>> G = nx.random_geometric_graph(20, 0.1)
169
+
170
+ Notes
171
+ -----
172
+ This uses a *k*-d tree to build the graph.
173
+
174
+ The `pos` keyword argument can be used to specify node positions so you
175
+ can create an arbitrary distribution and domain for positions.
176
+
177
+ For example, to use a 2D Gaussian distribution of node positions with mean
178
+ (0, 0) and standard deviation 2::
179
+
180
+ >>> import random
181
+ >>> n = 20
182
+ >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)}
183
+ >>> G = nx.random_geometric_graph(n, 0.2, pos=pos)
184
+
185
+ References
186
+ ----------
187
+ .. [1] Penrose, Mathew, *Random Geometric Graphs*,
188
+ Oxford Studies in Probability, 5, 2003.
189
+
190
+ """
191
+ # TODO Is this function just a special case of the geographical
192
+ # threshold graph?
193
+ #
194
+ # half_radius = {v: radius / 2 for v in n}
195
+ # return geographical_threshold_graph(nodes, theta=1, alpha=1,
196
+ # weight=half_radius)
197
+ #
198
+ G = nx.empty_graph(n)
199
+ # If no positions are provided, choose uniformly random vectors in
200
+ # Euclidean space of the specified dimension.
201
+ if pos is None:
202
+ pos = {v: [seed.random() for i in range(dim)] for v in G}
203
+ nx.set_node_attributes(G, pos, pos_name)
204
+
205
+ G.add_edges_from(_geometric_edges(G, radius, p, pos_name))
206
+ return G
207
+
208
+
209
+ @py_random_state(6)
210
+ @nx._dispatchable(graphs=None, returns_graph=True)
211
+ def soft_random_geometric_graph(
212
+ n, radius, dim=2, pos=None, p=2, p_dist=None, seed=None, *, pos_name="pos"
213
+ ):
214
+ r"""Returns a soft random geometric graph in the unit cube.
215
+
216
+ The soft random geometric graph [1] model places `n` nodes uniformly at
217
+ random in the unit cube in dimension `dim`. Two nodes of distance, `dist`,
218
+ computed by the `p`-Minkowski distance metric are joined by an edge with
219
+ probability `p_dist` if the computed distance metric value of the nodes
220
+ is at most `radius`, otherwise they are not joined.
221
+
222
+ Edges within `radius` of each other are determined using a KDTree when
223
+ SciPy is available. This reduces the time complexity from :math:`O(n^2)`
224
+ to :math:`O(n)`.
225
+
226
+ Parameters
227
+ ----------
228
+ n : int or iterable
229
+ Number of nodes or iterable of nodes
230
+ radius: float
231
+ Distance threshold value
232
+ dim : int, optional
233
+ Dimension of graph
234
+ pos : dict, optional
235
+ A dictionary keyed by node with node positions as values.
236
+ p : float, optional
237
+ Which Minkowski distance metric to use.
238
+ `p` has to meet the condition ``1 <= p <= infinity``.
239
+
240
+ If this argument is not specified, the :math:`L^2` metric
241
+ (the Euclidean distance metric), p = 2 is used.
242
+
243
+ This should not be confused with the `p` of an Erdős-Rényi random
244
+ graph, which represents probability.
245
+ p_dist : function, optional
246
+ A probability density function computing the probability of
247
+ connecting two nodes that are of distance, dist, computed by the
248
+ Minkowski distance metric. The probability density function, `p_dist`,
249
+ must be any function that takes the metric value as input
250
+ and outputs a single probability value between 0-1. The scipy.stats
251
+ package has many probability distribution functions implemented and
252
+ tools for custom probability distribution definitions [2], and passing
253
+ the .pdf method of scipy.stats distributions can be used here. If the
254
+ probability function, `p_dist`, is not supplied, the default function
255
+ is an exponential distribution with rate parameter :math:`\lambda=1`.
256
+ seed : integer, random_state, or None (default)
257
+ Indicator of random number generation state.
258
+ See :ref:`Randomness<randomness>`.
259
+ pos_name : string, default="pos"
260
+ The name of the node attribute which represents the position
261
+ in 2D coordinates of the node in the returned graph.
262
+
263
+ Returns
264
+ -------
265
+ Graph
266
+ A soft random geometric graph, undirected and without self-loops.
267
+ Each node has a node attribute ``'pos'`` that stores the
268
+ position of that node in Euclidean space as provided by the
269
+ ``pos`` keyword argument or, if ``pos`` was not provided, as
270
+ generated by this function.
271
+
272
+ Examples
273
+ --------
274
+ Default Graph:
275
+
276
+ G = nx.soft_random_geometric_graph(50, 0.2)
277
+
278
+ Custom Graph:
279
+
280
+ Create a soft random geometric graph on 100 uniformly distributed nodes
281
+ where nodes are joined by an edge with probability computed from an
282
+ exponential distribution with rate parameter :math:`\lambda=1` if their
283
+ Euclidean distance is at most 0.2.
284
+
285
+ Notes
286
+ -----
287
+ This uses a *k*-d tree to build the graph.
288
+
289
+ The `pos` keyword argument can be used to specify node positions so you
290
+ can create an arbitrary distribution and domain for positions.
291
+
292
+ For example, to use a 2D Gaussian distribution of node positions with mean
293
+ (0, 0) and standard deviation 2
294
+
295
+ The scipy.stats package can be used to define the probability distribution
296
+ with the .pdf method used as `p_dist`.
297
+
298
+ ::
299
+
300
+ >>> import random
301
+ >>> import math
302
+ >>> n = 100
303
+ >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)}
304
+ >>> p_dist = lambda dist: math.exp(-dist)
305
+ >>> G = nx.soft_random_geometric_graph(n, 0.2, pos=pos, p_dist=p_dist)
306
+
307
+ References
308
+ ----------
309
+ .. [1] Penrose, Mathew D. "Connectivity of soft random geometric graphs."
310
+ The Annals of Applied Probability 26.2 (2016): 986-1028.
311
+ .. [2] scipy.stats -
312
+ https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html
313
+
314
+ """
315
+ G = nx.empty_graph(n)
316
+ G.name = f"soft_random_geometric_graph({n}, {radius}, {dim})"
317
+ # If no positions are provided, choose uniformly random vectors in
318
+ # Euclidean space of the specified dimension.
319
+ if pos is None:
320
+ pos = {v: [seed.random() for i in range(dim)] for v in G}
321
+ nx.set_node_attributes(G, pos, pos_name)
322
+
323
+ # if p_dist function not supplied the default function is an exponential
324
+ # distribution with rate parameter :math:`\lambda=1`.
325
+ if p_dist is None:
326
+
327
+ def p_dist(dist):
328
+ return math.exp(-dist)
329
+
330
+ def should_join(edge):
331
+ u, v = edge
332
+ dist = (sum(abs(a - b) ** p for a, b in zip(pos[u], pos[v]))) ** (1 / p)
333
+ return seed.random() < p_dist(dist)
334
+
335
+ G.add_edges_from(filter(should_join, _geometric_edges(G, radius, p, pos_name)))
336
+ return G
337
+
338
+
339
+ @py_random_state(7)
340
+ @nx._dispatchable(graphs=None, returns_graph=True)
341
+ def geographical_threshold_graph(
342
+ n,
343
+ theta,
344
+ dim=2,
345
+ pos=None,
346
+ weight=None,
347
+ metric=None,
348
+ p_dist=None,
349
+ seed=None,
350
+ *,
351
+ pos_name="pos",
352
+ weight_name="weight",
353
+ ):
354
+ r"""Returns a geographical threshold graph.
355
+
356
+ The geographical threshold graph model places $n$ nodes uniformly at
357
+ random in a rectangular domain. Each node $u$ is assigned a weight
358
+ $w_u$. Two nodes $u$ and $v$ are joined by an edge if
359
+
360
+ .. math::
361
+
362
+ (w_u + w_v)p_{dist}(r) \ge \theta
363
+
364
+ where `r` is the distance between `u` and `v`, `p_dist` is any function of
365
+ `r`, and :math:`\theta` as the threshold parameter. `p_dist` is used to
366
+ give weight to the distance between nodes when deciding whether or not
367
+ they should be connected. The larger `p_dist` is, the more prone nodes
368
+ separated by `r` are to be connected, and vice versa.
369
+
370
+ Parameters
371
+ ----------
372
+ n : int or iterable
373
+ Number of nodes or iterable of nodes
374
+ theta: float
375
+ Threshold value
376
+ dim : int, optional
377
+ Dimension of graph
378
+ pos : dict
379
+ Node positions as a dictionary of tuples keyed by node.
380
+ weight : dict
381
+ Node weights as a dictionary of numbers keyed by node.
382
+ metric : function
383
+ A metric on vectors of numbers (represented as lists or
384
+ tuples). This must be a function that accepts two lists (or
385
+ tuples) as input and yields a number as output. The function
386
+ must also satisfy the four requirements of a `metric`_.
387
+ Specifically, if $d$ is the function and $x$, $y$,
388
+ and $z$ are vectors in the graph, then $d$ must satisfy
389
+
390
+ 1. $d(x, y) \ge 0$,
391
+ 2. $d(x, y) = 0$ if and only if $x = y$,
392
+ 3. $d(x, y) = d(y, x)$,
393
+ 4. $d(x, z) \le d(x, y) + d(y, z)$.
394
+
395
+ If this argument is not specified, the Euclidean distance metric is
396
+ used.
397
+
398
+ .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29
399
+ p_dist : function, optional
400
+ Any function used to give weight to the distance between nodes when
401
+ deciding whether or not they should be connected. `p_dist` was
402
+ originally conceived as a probability density function giving the
403
+ probability of connecting two nodes that are of metric distance `r`
404
+ apart. The implementation here allows for more arbitrary definitions
405
+ of `p_dist` that do not need to correspond to valid probability
406
+ density functions. The :mod:`scipy.stats` package has many
407
+ probability density functions implemented and tools for custom
408
+ probability density definitions, and passing the ``.pdf`` method of
409
+ scipy.stats distributions can be used here. If ``p_dist=None``
410
+ (the default), the exponential function :math:`r^{-2}` is used.
411
+ seed : integer, random_state, or None (default)
412
+ Indicator of random number generation state.
413
+ See :ref:`Randomness<randomness>`.
414
+ pos_name : string, default="pos"
415
+ The name of the node attribute which represents the position
416
+ in 2D coordinates of the node in the returned graph.
417
+ weight_name : string, default="weight"
418
+ The name of the node attribute which represents the weight
419
+ of the node in the returned graph.
420
+
421
+ Returns
422
+ -------
423
+ Graph
424
+ A random geographic threshold graph, undirected and without
425
+ self-loops.
426
+
427
+ Each node has a node attribute ``pos`` that stores the
428
+ position of that node in Euclidean space as provided by the
429
+ ``pos`` keyword argument or, if ``pos`` was not provided, as
430
+ generated by this function. Similarly, each node has a node
431
+ attribute ``weight`` that stores the weight of that node as
432
+ provided or as generated.
433
+
434
+ Examples
435
+ --------
436
+ Specify an alternate distance metric using the ``metric`` keyword
437
+ argument. For example, to use the `taxicab metric`_ instead of the
438
+ default `Euclidean metric`_::
439
+
440
+ >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
441
+ >>> G = nx.geographical_threshold_graph(10, 0.1, metric=dist)
442
+
443
+ .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry
444
+ .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance
445
+
446
+ Notes
447
+ -----
448
+ If weights are not specified they are assigned to nodes by drawing randomly
449
+ from the exponential distribution with rate parameter $\lambda=1$.
450
+ To specify weights from a different distribution, use the `weight` keyword
451
+ argument::
452
+
453
+ >>> import random
454
+ >>> n = 20
455
+ >>> w = {i: random.expovariate(5.0) for i in range(n)}
456
+ >>> G = nx.geographical_threshold_graph(20, 50, weight=w)
457
+
458
+ If node positions are not specified they are randomly assigned from the
459
+ uniform distribution.
460
+
461
+ References
462
+ ----------
463
+ .. [1] Masuda, N., Miwa, H., Konno, N.:
464
+ Geographical threshold graphs with small-world and scale-free
465
+ properties.
466
+ Physical Review E 71, 036108 (2005)
467
+ .. [2] Milan Bradonjić, Aric Hagberg and Allon G. Percus,
468
+ Giant component and connectivity in geographical threshold graphs,
469
+ in Algorithms and Models for the Web-Graph (WAW 2007),
470
+ Antony Bonato and Fan Chung (Eds), pp. 209--216, 2007
471
+ """
472
+ G = nx.empty_graph(n)
473
+ # If no weights are provided, choose them from an exponential
474
+ # distribution.
475
+ if weight is None:
476
+ weight = {v: seed.expovariate(1) for v in G}
477
+ # If no positions are provided, choose uniformly random vectors in
478
+ # Euclidean space of the specified dimension.
479
+ if pos is None:
480
+ pos = {v: [seed.random() for i in range(dim)] for v in G}
481
+ # If no distance metric is provided, use Euclidean distance.
482
+ if metric is None:
483
+ metric = math.dist
484
+ nx.set_node_attributes(G, weight, weight_name)
485
+ nx.set_node_attributes(G, pos, pos_name)
486
+
487
+ # if p_dist is not supplied, use default r^-2
488
+ if p_dist is None:
489
+
490
+ def p_dist(r):
491
+ return r**-2
492
+
493
+ # Returns ``True`` if and only if the nodes whose attributes are
494
+ # ``du`` and ``dv`` should be joined, according to the threshold
495
+ # condition.
496
+ def should_join(pair):
497
+ u, v = pair
498
+ u_pos, v_pos = pos[u], pos[v]
499
+ u_weight, v_weight = weight[u], weight[v]
500
+ return (u_weight + v_weight) * p_dist(metric(u_pos, v_pos)) >= theta
501
+
502
+ G.add_edges_from(filter(should_join, combinations(G, 2)))
503
+ return G
504
+
505
+
506
+ @py_random_state(6)
507
+ @nx._dispatchable(graphs=None, returns_graph=True)
508
+ def waxman_graph(
509
+ n,
510
+ beta=0.4,
511
+ alpha=0.1,
512
+ L=None,
513
+ domain=(0, 0, 1, 1),
514
+ metric=None,
515
+ seed=None,
516
+ *,
517
+ pos_name="pos",
518
+ ):
519
+ r"""Returns a Waxman random graph.
520
+
521
+ The Waxman random graph model places `n` nodes uniformly at random
522
+ in a rectangular domain. Each pair of nodes at distance `d` is
523
+ joined by an edge with probability
524
+
525
+ .. math::
526
+ p = \beta \exp(-d / \alpha L).
527
+
528
+ This function implements both Waxman models, using the `L` keyword
529
+ argument.
530
+
531
+ * Waxman-1: if `L` is not specified, it is set to be the maximum distance
532
+ between any pair of nodes.
533
+ * Waxman-2: if `L` is specified, the distance between a pair of nodes is
534
+ chosen uniformly at random from the interval `[0, L]`.
535
+
536
+ Parameters
537
+ ----------
538
+ n : int or iterable
539
+ Number of nodes or iterable of nodes
540
+ beta: float
541
+ Model parameter
542
+ alpha: float
543
+ Model parameter
544
+ L : float, optional
545
+ Maximum distance between nodes. If not specified, the actual distance
546
+ is calculated.
547
+ domain : four-tuple of numbers, optional
548
+ Domain size, given as a tuple of the form `(x_min, y_min, x_max,
549
+ y_max)`.
550
+ metric : function
551
+ A metric on vectors of numbers (represented as lists or
552
+ tuples). This must be a function that accepts two lists (or
553
+ tuples) as input and yields a number as output. The function
554
+ must also satisfy the four requirements of a `metric`_.
555
+ Specifically, if $d$ is the function and $x$, $y$,
556
+ and $z$ are vectors in the graph, then $d$ must satisfy
557
+
558
+ 1. $d(x, y) \ge 0$,
559
+ 2. $d(x, y) = 0$ if and only if $x = y$,
560
+ 3. $d(x, y) = d(y, x)$,
561
+ 4. $d(x, z) \le d(x, y) + d(y, z)$.
562
+
563
+ If this argument is not specified, the Euclidean distance metric is
564
+ used.
565
+
566
+ .. _metric: https://en.wikipedia.org/wiki/Metric_%28mathematics%29
567
+
568
+ seed : integer, random_state, or None (default)
569
+ Indicator of random number generation state.
570
+ See :ref:`Randomness<randomness>`.
571
+ pos_name : string, default="pos"
572
+ The name of the node attribute which represents the position
573
+ in 2D coordinates of the node in the returned graph.
574
+
575
+ Returns
576
+ -------
577
+ Graph
578
+ A random Waxman graph, undirected and without self-loops. Each
579
+ node has a node attribute ``'pos'`` that stores the position of
580
+ that node in Euclidean space as generated by this function.
581
+
582
+ Examples
583
+ --------
584
+ Specify an alternate distance metric using the ``metric`` keyword
585
+ argument. For example, to use the "`taxicab metric`_" instead of the
586
+ default `Euclidean metric`_::
587
+
588
+ >>> dist = lambda x, y: sum(abs(a - b) for a, b in zip(x, y))
589
+ >>> G = nx.waxman_graph(10, 0.5, 0.1, metric=dist)
590
+
591
+ .. _taxicab metric: https://en.wikipedia.org/wiki/Taxicab_geometry
592
+ .. _Euclidean metric: https://en.wikipedia.org/wiki/Euclidean_distance
593
+
594
+ Notes
595
+ -----
596
+ Starting in NetworkX 2.0 the parameters alpha and beta align with their
597
+ usual roles in the probability distribution. In earlier versions their
598
+ positions in the expression were reversed. Their position in the calling
599
+ sequence reversed as well to minimize backward incompatibility.
600
+
601
+ References
602
+ ----------
603
+ .. [1] B. M. Waxman, *Routing of multipoint connections*.
604
+ IEEE J. Select. Areas Commun. 6(9),(1988) 1617--1622.
605
+ """
606
+ G = nx.empty_graph(n)
607
+ (xmin, ymin, xmax, ymax) = domain
608
+ # Each node gets a uniformly random position in the given rectangle.
609
+ pos = {v: (seed.uniform(xmin, xmax), seed.uniform(ymin, ymax)) for v in G}
610
+ nx.set_node_attributes(G, pos, pos_name)
611
+ # If no distance metric is provided, use Euclidean distance.
612
+ if metric is None:
613
+ metric = math.dist
614
+ # If the maximum distance L is not specified (that is, we are in the
615
+ # Waxman-1 model), then find the maximum distance between any pair
616
+ # of nodes.
617
+ #
618
+ # In the Waxman-1 model, join nodes randomly based on distance. In
619
+ # the Waxman-2 model, join randomly based on random l.
620
+ if L is None:
621
+ L = max(metric(x, y) for x, y in combinations(pos.values(), 2))
622
+
623
+ def dist(u, v):
624
+ return metric(pos[u], pos[v])
625
+
626
+ else:
627
+
628
+ def dist(u, v):
629
+ return seed.random() * L
630
+
631
+ # `pair` is the pair of nodes to decide whether to join.
632
+ def should_join(pair):
633
+ return seed.random() < beta * math.exp(-dist(*pair) / (alpha * L))
634
+
635
+ G.add_edges_from(filter(should_join, combinations(G, 2)))
636
+ return G
637
+
638
+
639
+ @py_random_state(5)
640
+ @nx._dispatchable(graphs=None, returns_graph=True)
641
+ def navigable_small_world_graph(n, p=1, q=1, r=2, dim=2, seed=None):
642
+ r"""Returns a navigable small-world graph.
643
+
644
+ A navigable small-world graph is a directed grid with additional long-range
645
+ connections that are chosen randomly.
646
+
647
+ [...] we begin with a set of nodes [...] that are identified with the set
648
+ of lattice points in an $n \times n$ square,
649
+ $\{(i, j): i \in \{1, 2, \ldots, n\}, j \in \{1, 2, \ldots, n\}\}$,
650
+ and we define the *lattice distance* between two nodes $(i, j)$ and
651
+ $(k, l)$ to be the number of "lattice steps" separating them:
652
+ $d((i, j), (k, l)) = |k - i| + |l - j|$.
653
+
654
+ For a universal constant $p >= 1$, the node $u$ has a directed edge to
655
+ every other node within lattice distance $p$---these are its *local
656
+ contacts*. For universal constants $q >= 0$ and $r >= 0$ we also
657
+ construct directed edges from $u$ to $q$ other nodes (the *long-range
658
+ contacts*) using independent random trials; the $i$th directed edge from
659
+ $u$ has endpoint $v$ with probability proportional to $[d(u,v)]^{-r}$.
660
+
661
+ -- [1]_
662
+
663
+ Parameters
664
+ ----------
665
+ n : int
666
+ The length of one side of the lattice; the number of nodes in
667
+ the graph is therefore $n^2$.
668
+ p : int
669
+ The diameter of short range connections. Each node is joined with every
670
+ other node within this lattice distance.
671
+ q : int
672
+ The number of long-range connections for each node.
673
+ r : float
674
+ Exponent for decaying probability of connections. The probability of
675
+ connecting to a node at lattice distance $d$ is $1/d^r$.
676
+ dim : int
677
+ Dimension of grid
678
+ seed : integer, random_state, or None (default)
679
+ Indicator of random number generation state.
680
+ See :ref:`Randomness<randomness>`.
681
+
682
+ References
683
+ ----------
684
+ .. [1] J. Kleinberg. The small-world phenomenon: An algorithmic
685
+ perspective. Proc. 32nd ACM Symposium on Theory of Computing, 2000.
686
+ """
687
+ if p < 1:
688
+ raise nx.NetworkXException("p must be >= 1")
689
+ if q < 0:
690
+ raise nx.NetworkXException("q must be >= 0")
691
+ if r < 0:
692
+ raise nx.NetworkXException("r must be >= 0")
693
+
694
+ G = nx.DiGraph()
695
+ nodes = list(product(range(n), repeat=dim))
696
+ for p1 in nodes:
697
+ probs = [0]
698
+ for p2 in nodes:
699
+ if p1 == p2:
700
+ continue
701
+ d = sum((abs(b - a) for a, b in zip(p1, p2)))
702
+ if d <= p:
703
+ G.add_edge(p1, p2)
704
+ probs.append(d**-r)
705
+ cdf = list(accumulate(probs))
706
+ for _ in range(q):
707
+ target = nodes[bisect_left(cdf, seed.uniform(0, cdf[-1]))]
708
+ G.add_edge(p1, target)
709
+ return G
710
+
711
+
712
+ @py_random_state(7)
713
+ @nx._dispatchable(graphs=None, returns_graph=True)
714
+ def thresholded_random_geometric_graph(
715
+ n,
716
+ radius,
717
+ theta,
718
+ dim=2,
719
+ pos=None,
720
+ weight=None,
721
+ p=2,
722
+ seed=None,
723
+ *,
724
+ pos_name="pos",
725
+ weight_name="weight",
726
+ ):
727
+ r"""Returns a thresholded random geometric graph in the unit cube.
728
+
729
+ The thresholded random geometric graph [1] model places `n` nodes
730
+ uniformly at random in the unit cube of dimensions `dim`. Each node
731
+ `u` is assigned a weight :math:`w_u`. Two nodes `u` and `v` are
732
+ joined by an edge if they are within the maximum connection distance,
733
+ `radius` computed by the `p`-Minkowski distance and the summation of
734
+ weights :math:`w_u` + :math:`w_v` is greater than or equal
735
+ to the threshold parameter `theta`.
736
+
737
+ Edges within `radius` of each other are determined using a KDTree when
738
+ SciPy is available. This reduces the time complexity from :math:`O(n^2)`
739
+ to :math:`O(n)`.
740
+
741
+ Parameters
742
+ ----------
743
+ n : int or iterable
744
+ Number of nodes or iterable of nodes
745
+ radius: float
746
+ Distance threshold value
747
+ theta: float
748
+ Threshold value
749
+ dim : int, optional
750
+ Dimension of graph
751
+ pos : dict, optional
752
+ A dictionary keyed by node with node positions as values.
753
+ weight : dict, optional
754
+ Node weights as a dictionary of numbers keyed by node.
755
+ p : float, optional (default 2)
756
+ Which Minkowski distance metric to use. `p` has to meet the condition
757
+ ``1 <= p <= infinity``.
758
+
759
+ If this argument is not specified, the :math:`L^2` metric
760
+ (the Euclidean distance metric), p = 2 is used.
761
+
762
+ This should not be confused with the `p` of an Erdős-Rényi random
763
+ graph, which represents probability.
764
+ seed : integer, random_state, or None (default)
765
+ Indicator of random number generation state.
766
+ See :ref:`Randomness<randomness>`.
767
+ pos_name : string, default="pos"
768
+ The name of the node attribute which represents the position
769
+ in 2D coordinates of the node in the returned graph.
770
+ weight_name : string, default="weight"
771
+ The name of the node attribute which represents the weight
772
+ of the node in the returned graph.
773
+
774
+ Returns
775
+ -------
776
+ Graph
777
+ A thresholded random geographic graph, undirected and without
778
+ self-loops.
779
+
780
+ Each node has a node attribute ``'pos'`` that stores the
781
+ position of that node in Euclidean space as provided by the
782
+ ``pos`` keyword argument or, if ``pos`` was not provided, as
783
+ generated by this function. Similarly, each node has a nodethre
784
+ attribute ``'weight'`` that stores the weight of that node as
785
+ provided or as generated.
786
+
787
+ Examples
788
+ --------
789
+ Default Graph:
790
+
791
+ G = nx.thresholded_random_geometric_graph(50, 0.2, 0.1)
792
+
793
+ Custom Graph:
794
+
795
+ Create a thresholded random geometric graph on 50 uniformly distributed
796
+ nodes where nodes are joined by an edge if their sum weights drawn from
797
+ a exponential distribution with rate = 5 are >= theta = 0.1 and their
798
+ Euclidean distance is at most 0.2.
799
+
800
+ Notes
801
+ -----
802
+ This uses a *k*-d tree to build the graph.
803
+
804
+ The `pos` keyword argument can be used to specify node positions so you
805
+ can create an arbitrary distribution and domain for positions.
806
+
807
+ For example, to use a 2D Gaussian distribution of node positions with mean
808
+ (0, 0) and standard deviation 2
809
+
810
+ If weights are not specified they are assigned to nodes by drawing randomly
811
+ from the exponential distribution with rate parameter :math:`\lambda=1`.
812
+ To specify weights from a different distribution, use the `weight` keyword
813
+ argument::
814
+
815
+ ::
816
+
817
+ >>> import random
818
+ >>> import math
819
+ >>> n = 50
820
+ >>> pos = {i: (random.gauss(0, 2), random.gauss(0, 2)) for i in range(n)}
821
+ >>> w = {i: random.expovariate(5.0) for i in range(n)}
822
+ >>> G = nx.thresholded_random_geometric_graph(n, 0.2, 0.1, 2, pos, w)
823
+
824
+ References
825
+ ----------
826
+ .. [1] http://cole-maclean.github.io/blog/files/thesis.pdf
827
+
828
+ """
829
+ G = nx.empty_graph(n)
830
+ G.name = f"thresholded_random_geometric_graph({n}, {radius}, {theta}, {dim})"
831
+ # If no weights are provided, choose them from an exponential
832
+ # distribution.
833
+ if weight is None:
834
+ weight = {v: seed.expovariate(1) for v in G}
835
+ # If no positions are provided, choose uniformly random vectors in
836
+ # Euclidean space of the specified dimension.
837
+ if pos is None:
838
+ pos = {v: [seed.random() for i in range(dim)] for v in G}
839
+ # If no distance metric is provided, use Euclidean distance.
840
+ nx.set_node_attributes(G, weight, weight_name)
841
+ nx.set_node_attributes(G, pos, pos_name)
842
+
843
+ edges = (
844
+ (u, v)
845
+ for u, v in _geometric_edges(G, radius, p, pos_name)
846
+ if weight[u] + weight[v] >= theta
847
+ )
848
+ G.add_edges_from(edges)
849
+ return G
850
+
851
+
852
+ @py_random_state(5)
853
+ @nx._dispatchable(graphs=None, returns_graph=True)
854
+ def geometric_soft_configuration_graph(
855
+ *, beta, n=None, gamma=None, mean_degree=None, kappas=None, seed=None
856
+ ):
857
+ r"""Returns a random graph from the geometric soft configuration model.
858
+
859
+ The $\mathbb{S}^1$ model [1]_ is the geometric soft configuration model
860
+ which is able to explain many fundamental features of real networks such as
861
+ small-world property, heteregenous degree distributions, high level of
862
+ clustering, and self-similarity.
863
+
864
+ In the geometric soft configuration model, a node $i$ is assigned two hidden
865
+ variables: a hidden degree $\kappa_i$, quantifying its popularity, influence,
866
+ or importance, and an angular position $\theta_i$ in a circle abstracting the
867
+ similarity space, where angular distances between nodes are a proxy for their
868
+ similarity. Focusing on the angular position, this model is often called
869
+ the $\mathbb{S}^1$ model (a one-dimensional sphere). The circle's radius is
870
+ adjusted to $R = N/2\pi$, where $N$ is the number of nodes, so that the density
871
+ is set to 1 without loss of generality.
872
+
873
+ The connection probability between any pair of nodes increases with
874
+ the product of their hidden degrees (i.e., their combined popularities),
875
+ and decreases with the angular distance between the two nodes.
876
+ Specifically, nodes $i$ and $j$ are connected with the probability
877
+
878
+ $p_{ij} = \frac{1}{1 + \frac{d_{ij}^\beta}{\left(\mu \kappa_i \kappa_j\right)^{\max(1, \beta)}}}$
879
+
880
+ where $d_{ij} = R\Delta\theta_{ij}$ is the arc length of the circle between
881
+ nodes $i$ and $j$ separated by an angular distance $\Delta\theta_{ij}$.
882
+ Parameters $\mu$ and $\beta$ (also called inverse temperature) control the
883
+ average degree and the clustering coefficient, respectively.
884
+
885
+ It can be shown [2]_ that the model undergoes a structural phase transition
886
+ at $\beta=1$ so that for $\beta<1$ networks are unclustered in the thermodynamic
887
+ limit (when $N\to \infty$) whereas for $\beta>1$ the ensemble generates
888
+ networks with finite clustering coefficient.
889
+
890
+ The $\mathbb{S}^1$ model can be expressed as a purely geometric model
891
+ $\mathbb{H}^2$ in the hyperbolic plane [3]_ by mapping the hidden degree of
892
+ each node into a radial coordinate as
893
+
894
+ $r_i = \hat{R} - \frac{2 \max(1, \beta)}{\beta \zeta} \ln \left(\frac{\kappa_i}{\kappa_0}\right)$
895
+
896
+ where $\hat{R}$ is the radius of the hyperbolic disk and $\zeta$ is the curvature,
897
+
898
+ $\hat{R} = \frac{2}{\zeta} \ln \left(\frac{N}{\pi}\right)
899
+ - \frac{2\max(1, \beta)}{\beta \zeta} \ln (\mu \kappa_0^2)$
900
+
901
+ The connection probability then reads
902
+
903
+ $p_{ij} = \frac{1}{1 + \exp\left({\frac{\beta\zeta}{2} (x_{ij} - \hat{R})}\right)}$
904
+
905
+ where
906
+
907
+ $x_{ij} = r_i + r_j + \frac{2}{\zeta} \ln \frac{\Delta\theta_{ij}}{2}$
908
+
909
+ is a good approximation of the hyperbolic distance between two nodes separated
910
+ by an angular distance $\Delta\theta_{ij}$ with radial coordinates $r_i$ and $r_j$.
911
+ For $\beta > 1$, the curvature $\zeta = 1$, for $\beta < 1$, $\zeta = \beta^{-1}$.
912
+
913
+
914
+ Parameters
915
+ ----------
916
+ Either `n`, `gamma`, `mean_degree` are provided or `kappas`. The values of
917
+ `n`, `gamma`, `mean_degree` (if provided) are used to construct a random
918
+ kappa-dict keyed by node with values sampled from a power-law distribution.
919
+
920
+ beta : positive number
921
+ Inverse temperature, controlling the clustering coefficient.
922
+ n : int (default: None)
923
+ Size of the network (number of nodes).
924
+ If not provided, `kappas` must be provided and holds the nodes.
925
+ gamma : float (default: None)
926
+ Exponent of the power-law distribution for hidden degrees `kappas`.
927
+ If not provided, `kappas` must be provided directly.
928
+ mean_degree : float (default: None)
929
+ The mean degree in the network.
930
+ If not provided, `kappas` must be provided directly.
931
+ kappas : dict (default: None)
932
+ A dict keyed by node to its hidden degree value.
933
+ If not provided, random values are computed based on a power-law
934
+ distribution using `n`, `gamma` and `mean_degree`.
935
+ seed : int, random_state, or None (default)
936
+ Indicator of random number generation state.
937
+ See :ref:`Randomness<randomness>`.
938
+
939
+ Returns
940
+ -------
941
+ Graph
942
+ A random geometric soft configuration graph (undirected with no self-loops).
943
+ Each node has three node-attributes:
944
+
945
+ - ``kappa`` that represents the hidden degree.
946
+
947
+ - ``theta`` the position in the similarity space ($\mathbb{S}^1$) which is
948
+ also the angular position in the hyperbolic plane.
949
+
950
+ - ``radius`` the radial position in the hyperbolic plane
951
+ (based on the hidden degree).
952
+
953
+
954
+ Examples
955
+ --------
956
+ Generate a network with specified parameters:
957
+
958
+ >>> G = nx.geometric_soft_configuration_graph(beta=1.5, n=100, gamma=2.7, mean_degree=5)
959
+
960
+ Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter
961
+ is set to 1.5 and the exponent of the powerlaw distribution of the hidden
962
+ degrees is 2.7 with mean value of 5.
963
+
964
+ Generate a network with predefined hidden degrees:
965
+
966
+ >>> kappas = {i: 10 for i in range(100)}
967
+ >>> G = nx.geometric_soft_configuration_graph(beta=2.5, kappas=kappas)
968
+
969
+ Create a geometric soft configuration graph with 100 nodes. The $\beta$ parameter
970
+ is set to 2.5 and all nodes with hidden degree $\kappa=10$.
971
+
972
+
973
+ References
974
+ ----------
975
+ .. [1] Serrano, M. Á., Krioukov, D., & Boguñá, M. (2008). Self-similarity
976
+ of complex networks and hidden metric spaces. Physical review letters, 100(7), 078701.
977
+
978
+ .. [2] van der Kolk, J., Serrano, M. Á., & Boguñá, M. (2022). An anomalous
979
+ topological phase transition in spatial random graphs. Communications Physics, 5(1), 245.
980
+
981
+ .. [3] Krioukov, D., Papadopoulos, F., Kitsak, M., Vahdat, A., & Boguná, M. (2010).
982
+ Hyperbolic geometry of complex networks. Physical Review E, 82(3), 036106.
983
+
984
+ """
985
+ if beta <= 0:
986
+ raise nx.NetworkXError("The parameter beta cannot be smaller or equal to 0.")
987
+
988
+ if kappas is not None:
989
+ if not all((n is None, gamma is None, mean_degree is None)):
990
+ raise nx.NetworkXError(
991
+ "When kappas is input, n, gamma and mean_degree must not be."
992
+ )
993
+
994
+ n = len(kappas)
995
+ mean_degree = sum(kappas) / len(kappas)
996
+ else:
997
+ if any((n is None, gamma is None, mean_degree is None)):
998
+ raise nx.NetworkXError(
999
+ "Please provide either kappas, or all 3 of: n, gamma and mean_degree."
1000
+ )
1001
+
1002
+ # Generate `n` hidden degrees from a powerlaw distribution
1003
+ # with given exponent `gamma` and mean value `mean_degree`
1004
+ gam_ratio = (gamma - 2) / (gamma - 1)
1005
+ kappa_0 = mean_degree * gam_ratio * (1 - 1 / n) / (1 - 1 / n**gam_ratio)
1006
+ base = 1 - 1 / n
1007
+ power = 1 / (1 - gamma)
1008
+ kappas = {i: kappa_0 * (1 - seed.random() * base) ** power for i in range(n)}
1009
+
1010
+ G = nx.Graph()
1011
+ R = n / (2 * math.pi)
1012
+
1013
+ # Approximate values for mu in the thermodynamic limit (when n -> infinity)
1014
+ if beta > 1:
1015
+ mu = beta * math.sin(math.pi / beta) / (2 * math.pi * mean_degree)
1016
+ elif beta == 1:
1017
+ mu = 1 / (2 * mean_degree * math.log(n))
1018
+ else:
1019
+ mu = (1 - beta) / (2**beta * mean_degree * n ** (1 - beta))
1020
+
1021
+ # Generate random positions on a circle
1022
+ thetas = {k: seed.uniform(0, 2 * math.pi) for k in kappas}
1023
+
1024
+ for u in kappas:
1025
+ for v in list(G):
1026
+ angle = math.pi - math.fabs(math.pi - math.fabs(thetas[u] - thetas[v]))
1027
+ dij = math.pow(R * angle, beta)
1028
+ mu_kappas = math.pow(mu * kappas[u] * kappas[v], max(1, beta))
1029
+ p_ij = 1 / (1 + dij / mu_kappas)
1030
+
1031
+ # Create an edge with a certain connection probability
1032
+ if seed.random() < p_ij:
1033
+ G.add_edge(u, v)
1034
+ G.add_node(u)
1035
+
1036
+ nx.set_node_attributes(G, thetas, "theta")
1037
+ nx.set_node_attributes(G, kappas, "kappa")
1038
+
1039
+ # Map hidden degrees into the radial coordiantes
1040
+ zeta = 1 if beta > 1 else 1 / beta
1041
+ kappa_min = min(kappas.values())
1042
+ R_c = 2 * max(1, beta) / (beta * zeta)
1043
+ R_hat = (2 / zeta) * math.log(n / math.pi) - R_c * math.log(mu * kappa_min)
1044
+ radii = {node: R_hat - R_c * math.log(kappa) for node, kappa in kappas.items()}
1045
+ nx.set_node_attributes(G, radii, "radius")
1046
+
1047
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/harary_graph.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generators for Harary graphs
2
+
3
+ This module gives two generators for the Harary graph, which was
4
+ introduced by the famous mathematician Frank Harary in his 1962 work [H]_.
5
+ The first generator gives the Harary graph that maximizes the node
6
+ connectivity with given number of nodes and given number of edges.
7
+ The second generator gives the Harary graph that minimizes
8
+ the number of edges in the graph with given node connectivity and
9
+ number of nodes.
10
+
11
+ References
12
+ ----------
13
+ .. [H] Harary, F. "The Maximum Connectivity of a Graph."
14
+ Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.
15
+
16
+ """
17
+
18
+ import networkx as nx
19
+ from networkx.exception import NetworkXError
20
+
21
+ __all__ = ["hnm_harary_graph", "hkn_harary_graph"]
22
+
23
+
24
+ @nx._dispatchable(graphs=None, returns_graph=True)
25
+ def hnm_harary_graph(n, m, create_using=None):
26
+ """Returns the Harary graph with given numbers of nodes and edges.
27
+
28
+ The Harary graph $H_{n,m}$ is the graph that maximizes node connectivity
29
+ with $n$ nodes and $m$ edges.
30
+
31
+ This maximum node connectivity is known to be floor($2m/n$). [1]_
32
+
33
+ Parameters
34
+ ----------
35
+ n: integer
36
+ The number of nodes the generated graph is to contain
37
+
38
+ m: integer
39
+ The number of edges the generated graph is to contain
40
+
41
+ create_using : NetworkX graph constructor, optional Graph type
42
+ to create (default=nx.Graph). If graph instance, then cleared
43
+ before populated.
44
+
45
+ Returns
46
+ -------
47
+ NetworkX graph
48
+ The Harary graph $H_{n,m}$.
49
+
50
+ See Also
51
+ --------
52
+ hkn_harary_graph
53
+
54
+ Notes
55
+ -----
56
+ This algorithm runs in $O(m)$ time.
57
+ It is implemented by following the Reference [2]_.
58
+
59
+ References
60
+ ----------
61
+ .. [1] F. T. Boesch, A. Satyanarayana, and C. L. Suffel,
62
+ "A Survey of Some Network Reliability Analysis and Synthesis Results,"
63
+ Networks, pp. 99-107, 2009.
64
+
65
+ .. [2] Harary, F. "The Maximum Connectivity of a Graph."
66
+ Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.
67
+ """
68
+
69
+ if n < 1:
70
+ raise NetworkXError("The number of nodes must be >= 1!")
71
+ if m < n - 1:
72
+ raise NetworkXError("The number of edges must be >= n - 1 !")
73
+ if m > n * (n - 1) // 2:
74
+ raise NetworkXError("The number of edges must be <= n(n-1)/2")
75
+
76
+ # Construct an empty graph with n nodes first
77
+ H = nx.empty_graph(n, create_using)
78
+ # Get the floor of average node degree
79
+ d = 2 * m // n
80
+
81
+ # Test the parity of n and d
82
+ if (n % 2 == 0) or (d % 2 == 0):
83
+ # Start with a regular graph of d degrees
84
+ offset = d // 2
85
+ for i in range(n):
86
+ for j in range(1, offset + 1):
87
+ H.add_edge(i, (i - j) % n)
88
+ H.add_edge(i, (i + j) % n)
89
+ if d & 1:
90
+ # in case d is odd; n must be even in this case
91
+ half = n // 2
92
+ for i in range(half):
93
+ # add edges diagonally
94
+ H.add_edge(i, i + half)
95
+ # Get the remainder of 2*m modulo n
96
+ r = 2 * m % n
97
+ if r > 0:
98
+ # add remaining edges at offset+1
99
+ for i in range(r // 2):
100
+ H.add_edge(i, i + offset + 1)
101
+ else:
102
+ # Start with a regular graph of (d - 1) degrees
103
+ offset = (d - 1) // 2
104
+ for i in range(n):
105
+ for j in range(1, offset + 1):
106
+ H.add_edge(i, (i - j) % n)
107
+ H.add_edge(i, (i + j) % n)
108
+ half = n // 2
109
+ for i in range(m - n * offset):
110
+ # add the remaining m - n*offset edges between i and i+half
111
+ H.add_edge(i, (i + half) % n)
112
+
113
+ return H
114
+
115
+
116
+ @nx._dispatchable(graphs=None, returns_graph=True)
117
+ def hkn_harary_graph(k, n, create_using=None):
118
+ """Returns the Harary graph with given node connectivity and node number.
119
+
120
+ The Harary graph $H_{k,n}$ is the graph that minimizes the number of
121
+ edges needed with given node connectivity $k$ and node number $n$.
122
+
123
+ This smallest number of edges is known to be ceil($kn/2$) [1]_.
124
+
125
+ Parameters
126
+ ----------
127
+ k: integer
128
+ The node connectivity of the generated graph
129
+
130
+ n: integer
131
+ The number of nodes the generated graph is to contain
132
+
133
+ create_using : NetworkX graph constructor, optional Graph type
134
+ to create (default=nx.Graph). If graph instance, then cleared
135
+ before populated.
136
+
137
+ Returns
138
+ -------
139
+ NetworkX graph
140
+ The Harary graph $H_{k,n}$.
141
+
142
+ See Also
143
+ --------
144
+ hnm_harary_graph
145
+
146
+ Notes
147
+ -----
148
+ This algorithm runs in $O(kn)$ time.
149
+ It is implemented by following the Reference [2]_.
150
+
151
+ References
152
+ ----------
153
+ .. [1] Weisstein, Eric W. "Harary Graph." From MathWorld--A Wolfram Web
154
+ Resource. http://mathworld.wolfram.com/HararyGraph.html.
155
+
156
+ .. [2] Harary, F. "The Maximum Connectivity of a Graph."
157
+ Proc. Nat. Acad. Sci. USA 48, 1142-1146, 1962.
158
+ """
159
+
160
+ if k < 1:
161
+ raise NetworkXError("The node connectivity must be >= 1!")
162
+ if n < k + 1:
163
+ raise NetworkXError("The number of nodes must be >= k+1 !")
164
+
165
+ # in case of connectivity 1, simply return the path graph
166
+ if k == 1:
167
+ H = nx.path_graph(n, create_using)
168
+ return H
169
+
170
+ # Construct an empty graph with n nodes first
171
+ H = nx.empty_graph(n, create_using)
172
+
173
+ # Test the parity of k and n
174
+ if (k % 2 == 0) or (n % 2 == 0):
175
+ # Construct a regular graph with k degrees
176
+ offset = k // 2
177
+ for i in range(n):
178
+ for j in range(1, offset + 1):
179
+ H.add_edge(i, (i - j) % n)
180
+ H.add_edge(i, (i + j) % n)
181
+ if k & 1:
182
+ # odd degree; n must be even in this case
183
+ half = n // 2
184
+ for i in range(half):
185
+ # add edges diagonally
186
+ H.add_edge(i, i + half)
187
+ else:
188
+ # Construct a regular graph with (k - 1) degrees
189
+ offset = (k - 1) // 2
190
+ for i in range(n):
191
+ for j in range(1, offset + 1):
192
+ H.add_edge(i, (i - j) % n)
193
+ H.add_edge(i, (i + j) % n)
194
+ half = n // 2
195
+ for i in range(half + 1):
196
+ # add half+1 edges between i and i+half
197
+ H.add_edge(i, (i + half) % n)
198
+
199
+ return H
llmeval-env/lib/python3.10/site-packages/networkx/generators/internet_as_graphs.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generates graphs resembling the Internet Autonomous System network"""
2
+
3
+ import networkx as nx
4
+ from networkx.utils import py_random_state
5
+
6
+ __all__ = ["random_internet_as_graph"]
7
+
8
+
9
+ def uniform_int_from_avg(a, m, seed):
10
+ """Pick a random integer with uniform probability.
11
+
12
+ Returns a random integer uniformly taken from a distribution with
13
+ minimum value 'a' and average value 'm', X~U(a,b), E[X]=m, X in N where
14
+ b = 2*m - a.
15
+
16
+ Notes
17
+ -----
18
+ p = (b-floor(b))/2
19
+ X = X1 + X2; X1~U(a,floor(b)), X2~B(p)
20
+ E[X] = E[X1] + E[X2] = (floor(b)+a)/2 + (b-floor(b))/2 = (b+a)/2 = m
21
+ """
22
+
23
+ from math import floor
24
+
25
+ assert m >= a
26
+ b = 2 * m - a
27
+ p = (b - floor(b)) / 2
28
+ X1 = round(seed.random() * (floor(b) - a) + a)
29
+ if seed.random() < p:
30
+ X2 = 1
31
+ else:
32
+ X2 = 0
33
+ return X1 + X2
34
+
35
+
36
+ def choose_pref_attach(degs, seed):
37
+ """Pick a random value, with a probability given by its weight.
38
+
39
+ Returns a random choice among degs keys, each of which has a
40
+ probability proportional to the corresponding dictionary value.
41
+
42
+ Parameters
43
+ ----------
44
+ degs: dictionary
45
+ It contains the possible values (keys) and the corresponding
46
+ probabilities (values)
47
+ seed: random state
48
+
49
+ Returns
50
+ -------
51
+ v: object
52
+ A key of degs or None if degs is empty
53
+ """
54
+
55
+ if len(degs) == 0:
56
+ return None
57
+ s = sum(degs.values())
58
+ if s == 0:
59
+ return seed.choice(list(degs.keys()))
60
+ v = seed.random() * s
61
+
62
+ nodes = list(degs.keys())
63
+ i = 0
64
+ acc = degs[nodes[i]]
65
+ while v > acc:
66
+ i += 1
67
+ acc += degs[nodes[i]]
68
+ return nodes[i]
69
+
70
+
71
+ class AS_graph_generator:
72
+ """Generates random internet AS graphs."""
73
+
74
+ def __init__(self, n, seed):
75
+ """Initializes variables. Immediate numbers are taken from [1].
76
+
77
+ Parameters
78
+ ----------
79
+ n: integer
80
+ Number of graph nodes
81
+ seed: random state
82
+ Indicator of random number generation state.
83
+ See :ref:`Randomness<randomness>`.
84
+
85
+ Returns
86
+ -------
87
+ GG: AS_graph_generator object
88
+
89
+ References
90
+ ----------
91
+ [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of
92
+ BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
93
+ in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
94
+ """
95
+
96
+ self.seed = seed
97
+ self.n_t = min(n, round(self.seed.random() * 2 + 4)) # num of T nodes
98
+ self.n_m = round(0.15 * n) # number of M nodes
99
+ self.n_cp = round(0.05 * n) # number of CP nodes
100
+ self.n_c = max(0, n - self.n_t - self.n_m - self.n_cp) # number of C nodes
101
+
102
+ self.d_m = 2 + (2.5 * n) / 10000 # average multihoming degree for M nodes
103
+ self.d_cp = 2 + (1.5 * n) / 10000 # avg multihoming degree for CP nodes
104
+ self.d_c = 1 + (5 * n) / 100000 # average multihoming degree for C nodes
105
+
106
+ self.p_m_m = 1 + (2 * n) / 10000 # avg num of peer edges between M and M
107
+ self.p_cp_m = 0.2 + (2 * n) / 10000 # avg num of peer edges between CP, M
108
+ self.p_cp_cp = 0.05 + (2 * n) / 100000 # avg num of peer edges btwn CP, CP
109
+
110
+ self.t_m = 0.375 # probability M's provider is T
111
+ self.t_cp = 0.375 # probability CP's provider is T
112
+ self.t_c = 0.125 # probability C's provider is T
113
+
114
+ def t_graph(self):
115
+ """Generates the core mesh network of tier one nodes of a AS graph.
116
+
117
+ Returns
118
+ -------
119
+ G: Networkx Graph
120
+ Core network
121
+ """
122
+
123
+ self.G = nx.Graph()
124
+ for i in range(self.n_t):
125
+ self.G.add_node(i, type="T")
126
+ for r in self.regions:
127
+ self.regions[r].add(i)
128
+ for j in self.G.nodes():
129
+ if i != j:
130
+ self.add_edge(i, j, "peer")
131
+ self.customers[i] = set()
132
+ self.providers[i] = set()
133
+ return self.G
134
+
135
+ def add_edge(self, i, j, kind):
136
+ if kind == "transit":
137
+ customer = str(i)
138
+ else:
139
+ customer = "none"
140
+ self.G.add_edge(i, j, type=kind, customer=customer)
141
+
142
+ def choose_peer_pref_attach(self, node_list):
143
+ """Pick a node with a probability weighted by its peer degree.
144
+
145
+ Pick a node from node_list with preferential attachment
146
+ computed only on their peer degree
147
+ """
148
+
149
+ d = {}
150
+ for n in node_list:
151
+ d[n] = self.G.nodes[n]["peers"]
152
+ return choose_pref_attach(d, self.seed)
153
+
154
+ def choose_node_pref_attach(self, node_list):
155
+ """Pick a node with a probability weighted by its degree.
156
+
157
+ Pick a node from node_list with preferential attachment
158
+ computed on their degree
159
+ """
160
+
161
+ degs = dict(self.G.degree(node_list))
162
+ return choose_pref_attach(degs, self.seed)
163
+
164
+ def add_customer(self, i, j):
165
+ """Keep the dictionaries 'customers' and 'providers' consistent."""
166
+
167
+ self.customers[j].add(i)
168
+ self.providers[i].add(j)
169
+ for z in self.providers[j]:
170
+ self.customers[z].add(i)
171
+ self.providers[i].add(z)
172
+
173
+ def add_node(self, i, kind, reg2prob, avg_deg, t_edge_prob):
174
+ """Add a node and its customer transit edges to the graph.
175
+
176
+ Parameters
177
+ ----------
178
+ i: object
179
+ Identifier of the new node
180
+ kind: string
181
+ Type of the new node. Options are: 'M' for middle node, 'CP' for
182
+ content provider and 'C' for customer.
183
+ reg2prob: float
184
+ Probability the new node can be in two different regions.
185
+ avg_deg: float
186
+ Average number of transit nodes of which node i is customer.
187
+ t_edge_prob: float
188
+ Probability node i establish a customer transit edge with a tier
189
+ one (T) node
190
+
191
+ Returns
192
+ -------
193
+ i: object
194
+ Identifier of the new node
195
+ """
196
+
197
+ regs = 1 # regions in which node resides
198
+ if self.seed.random() < reg2prob: # node is in two regions
199
+ regs = 2
200
+ node_options = set()
201
+
202
+ self.G.add_node(i, type=kind, peers=0)
203
+ self.customers[i] = set()
204
+ self.providers[i] = set()
205
+ self.nodes[kind].add(i)
206
+ for r in self.seed.sample(list(self.regions), regs):
207
+ node_options = node_options.union(self.regions[r])
208
+ self.regions[r].add(i)
209
+
210
+ edge_num = uniform_int_from_avg(1, avg_deg, self.seed)
211
+
212
+ t_options = node_options.intersection(self.nodes["T"])
213
+ m_options = node_options.intersection(self.nodes["M"])
214
+ if i in m_options:
215
+ m_options.remove(i)
216
+ d = 0
217
+ while d < edge_num and (len(t_options) > 0 or len(m_options) > 0):
218
+ if len(m_options) == 0 or (
219
+ len(t_options) > 0 and self.seed.random() < t_edge_prob
220
+ ): # add edge to a T node
221
+ j = self.choose_node_pref_attach(t_options)
222
+ t_options.remove(j)
223
+ else:
224
+ j = self.choose_node_pref_attach(m_options)
225
+ m_options.remove(j)
226
+ self.add_edge(i, j, "transit")
227
+ self.add_customer(i, j)
228
+ d += 1
229
+
230
+ return i
231
+
232
+ def add_m_peering_link(self, m, to_kind):
233
+ """Add a peering link between two middle tier (M) nodes.
234
+
235
+ Target node j is drawn considering a preferential attachment based on
236
+ other M node peering degree.
237
+
238
+ Parameters
239
+ ----------
240
+ m: object
241
+ Node identifier
242
+ to_kind: string
243
+ type for target node j (must be always M)
244
+
245
+ Returns
246
+ -------
247
+ success: boolean
248
+ """
249
+
250
+ # candidates are of type 'M' and are not customers of m
251
+ node_options = self.nodes["M"].difference(self.customers[m])
252
+ # candidates are not providers of m
253
+ node_options = node_options.difference(self.providers[m])
254
+ # remove self
255
+ if m in node_options:
256
+ node_options.remove(m)
257
+
258
+ # remove candidates we are already connected to
259
+ for j in self.G.neighbors(m):
260
+ if j in node_options:
261
+ node_options.remove(j)
262
+
263
+ if len(node_options) > 0:
264
+ j = self.choose_peer_pref_attach(node_options)
265
+ self.add_edge(m, j, "peer")
266
+ self.G.nodes[m]["peers"] += 1
267
+ self.G.nodes[j]["peers"] += 1
268
+ return True
269
+ else:
270
+ return False
271
+
272
+ def add_cp_peering_link(self, cp, to_kind):
273
+ """Add a peering link to a content provider (CP) node.
274
+
275
+ Target node j can be CP or M and it is drawn uniformly among the nodes
276
+ belonging to the same region as cp.
277
+
278
+ Parameters
279
+ ----------
280
+ cp: object
281
+ Node identifier
282
+ to_kind: string
283
+ type for target node j (must be M or CP)
284
+
285
+ Returns
286
+ -------
287
+ success: boolean
288
+ """
289
+
290
+ node_options = set()
291
+ for r in self.regions: # options include nodes in the same region(s)
292
+ if cp in self.regions[r]:
293
+ node_options = node_options.union(self.regions[r])
294
+
295
+ # options are restricted to the indicated kind ('M' or 'CP')
296
+ node_options = self.nodes[to_kind].intersection(node_options)
297
+
298
+ # remove self
299
+ if cp in node_options:
300
+ node_options.remove(cp)
301
+
302
+ # remove nodes that are cp's providers
303
+ node_options = node_options.difference(self.providers[cp])
304
+
305
+ # remove nodes we are already connected to
306
+ for j in self.G.neighbors(cp):
307
+ if j in node_options:
308
+ node_options.remove(j)
309
+
310
+ if len(node_options) > 0:
311
+ j = self.seed.sample(list(node_options), 1)[0]
312
+ self.add_edge(cp, j, "peer")
313
+ self.G.nodes[cp]["peers"] += 1
314
+ self.G.nodes[j]["peers"] += 1
315
+ return True
316
+ else:
317
+ return False
318
+
319
+ def graph_regions(self, rn):
320
+ """Initializes AS network regions.
321
+
322
+ Parameters
323
+ ----------
324
+ rn: integer
325
+ Number of regions
326
+ """
327
+
328
+ self.regions = {}
329
+ for i in range(rn):
330
+ self.regions["REG" + str(i)] = set()
331
+
332
+ def add_peering_links(self, from_kind, to_kind):
333
+ """Utility function to add peering links among node groups."""
334
+ peer_link_method = None
335
+ if from_kind == "M":
336
+ peer_link_method = self.add_m_peering_link
337
+ m = self.p_m_m
338
+ if from_kind == "CP":
339
+ peer_link_method = self.add_cp_peering_link
340
+ if to_kind == "M":
341
+ m = self.p_cp_m
342
+ else:
343
+ m = self.p_cp_cp
344
+
345
+ for i in self.nodes[from_kind]:
346
+ num = uniform_int_from_avg(0, m, self.seed)
347
+ for _ in range(num):
348
+ peer_link_method(i, to_kind)
349
+
350
+ def generate(self):
351
+ """Generates a random AS network graph as described in [1].
352
+
353
+ Returns
354
+ -------
355
+ G: Graph object
356
+
357
+ Notes
358
+ -----
359
+ The process steps are the following: first we create the core network
360
+ of tier one nodes, then we add the middle tier (M), the content
361
+ provider (CP) and the customer (C) nodes along with their transit edges
362
+ (link i,j means i is customer of j). Finally we add peering links
363
+ between M nodes, between M and CP nodes and between CP node couples.
364
+ For a detailed description of the algorithm, please refer to [1].
365
+
366
+ References
367
+ ----------
368
+ [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of
369
+ BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
370
+ in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
371
+ """
372
+
373
+ self.graph_regions(5)
374
+ self.customers = {}
375
+ self.providers = {}
376
+ self.nodes = {"T": set(), "M": set(), "CP": set(), "C": set()}
377
+
378
+ self.t_graph()
379
+ self.nodes["T"] = set(self.G.nodes())
380
+
381
+ i = len(self.nodes["T"])
382
+ for _ in range(self.n_m):
383
+ self.nodes["M"].add(self.add_node(i, "M", 0.2, self.d_m, self.t_m))
384
+ i += 1
385
+ for _ in range(self.n_cp):
386
+ self.nodes["CP"].add(self.add_node(i, "CP", 0.05, self.d_cp, self.t_cp))
387
+ i += 1
388
+ for _ in range(self.n_c):
389
+ self.nodes["C"].add(self.add_node(i, "C", 0, self.d_c, self.t_c))
390
+ i += 1
391
+
392
+ self.add_peering_links("M", "M")
393
+ self.add_peering_links("CP", "M")
394
+ self.add_peering_links("CP", "CP")
395
+
396
+ return self.G
397
+
398
+
399
+ @py_random_state(1)
400
+ @nx._dispatchable(graphs=None, returns_graph=True)
401
+ def random_internet_as_graph(n, seed=None):
402
+ """Generates a random undirected graph resembling the Internet AS network
403
+
404
+ Parameters
405
+ ----------
406
+ n: integer in [1000, 10000]
407
+ Number of graph nodes
408
+ seed : integer, random_state, or None (default)
409
+ Indicator of random number generation state.
410
+ See :ref:`Randomness<randomness>`.
411
+
412
+ Returns
413
+ -------
414
+ G: Networkx Graph object
415
+ A randomly generated undirected graph
416
+
417
+ Notes
418
+ -----
419
+ This algorithm returns an undirected graph resembling the Internet
420
+ Autonomous System (AS) network, it uses the approach by Elmokashfi et al.
421
+ [1]_ and it grants the properties described in the related paper [1]_.
422
+
423
+ Each node models an autonomous system, with an attribute 'type' specifying
424
+ its kind; tier-1 (T), mid-level (M), customer (C) or content-provider (CP).
425
+ Each edge models an ADV communication link (hence, bidirectional) with
426
+ attributes:
427
+
428
+ - type: transit|peer, the kind of commercial agreement between nodes;
429
+ - customer: <node id>, the identifier of the node acting as customer
430
+ ('none' if type is peer).
431
+
432
+ References
433
+ ----------
434
+ .. [1] A. Elmokashfi, A. Kvalbein and C. Dovrolis, "On the Scalability of
435
+ BGP: The Role of Topology Growth," in IEEE Journal on Selected Areas
436
+ in Communications, vol. 28, no. 8, pp. 1250-1261, October 2010.
437
+ """
438
+
439
+ GG = AS_graph_generator(n, seed)
440
+ G = GG.generate()
441
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/intersection.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generators for random intersection graphs.
3
+ """
4
+ import networkx as nx
5
+ from networkx.utils import py_random_state
6
+
7
+ __all__ = [
8
+ "uniform_random_intersection_graph",
9
+ "k_random_intersection_graph",
10
+ "general_random_intersection_graph",
11
+ ]
12
+
13
+
14
+ @py_random_state(3)
15
+ @nx._dispatchable(graphs=None, returns_graph=True)
16
+ def uniform_random_intersection_graph(n, m, p, seed=None):
17
+ """Returns a uniform random intersection graph.
18
+
19
+ Parameters
20
+ ----------
21
+ n : int
22
+ The number of nodes in the first bipartite set (nodes)
23
+ m : int
24
+ The number of nodes in the second bipartite set (attributes)
25
+ p : float
26
+ Probability of connecting nodes between bipartite sets
27
+ seed : integer, random_state, or None (default)
28
+ Indicator of random number generation state.
29
+ See :ref:`Randomness<randomness>`.
30
+
31
+ See Also
32
+ --------
33
+ gnp_random_graph
34
+
35
+ References
36
+ ----------
37
+ .. [1] K.B. Singer-Cohen, Random Intersection Graphs, 1995,
38
+ PhD thesis, Johns Hopkins University
39
+ .. [2] Fill, J. A., Scheinerman, E. R., and Singer-Cohen, K. B.,
40
+ Random intersection graphs when m = !(n):
41
+ An equivalence theorem relating the evolution of the g(n, m, p)
42
+ and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176.
43
+ """
44
+ from networkx.algorithms import bipartite
45
+
46
+ G = bipartite.random_graph(n, m, p, seed)
47
+ return nx.projected_graph(G, range(n))
48
+
49
+
50
+ @py_random_state(3)
51
+ @nx._dispatchable(graphs=None, returns_graph=True)
52
+ def k_random_intersection_graph(n, m, k, seed=None):
53
+ """Returns a intersection graph with randomly chosen attribute sets for
54
+ each node that are of equal size (k).
55
+
56
+ Parameters
57
+ ----------
58
+ n : int
59
+ The number of nodes in the first bipartite set (nodes)
60
+ m : int
61
+ The number of nodes in the second bipartite set (attributes)
62
+ k : float
63
+ Size of attribute set to assign to each node.
64
+ seed : integer, random_state, or None (default)
65
+ Indicator of random number generation state.
66
+ See :ref:`Randomness<randomness>`.
67
+
68
+ See Also
69
+ --------
70
+ gnp_random_graph, uniform_random_intersection_graph
71
+
72
+ References
73
+ ----------
74
+ .. [1] Godehardt, E., and Jaworski, J.
75
+ Two models of random intersection graphs and their applications.
76
+ Electronic Notes in Discrete Mathematics 10 (2001), 129--132.
77
+ """
78
+ G = nx.empty_graph(n + m)
79
+ mset = range(n, n + m)
80
+ for v in range(n):
81
+ targets = seed.sample(mset, k)
82
+ G.add_edges_from(zip([v] * len(targets), targets))
83
+ return nx.projected_graph(G, range(n))
84
+
85
+
86
+ @py_random_state(3)
87
+ @nx._dispatchable(graphs=None, returns_graph=True)
88
+ def general_random_intersection_graph(n, m, p, seed=None):
89
+ """Returns a random intersection graph with independent probabilities
90
+ for connections between node and attribute sets.
91
+
92
+ Parameters
93
+ ----------
94
+ n : int
95
+ The number of nodes in the first bipartite set (nodes)
96
+ m : int
97
+ The number of nodes in the second bipartite set (attributes)
98
+ p : list of floats of length m
99
+ Probabilities for connecting nodes to each attribute
100
+ seed : integer, random_state, or None (default)
101
+ Indicator of random number generation state.
102
+ See :ref:`Randomness<randomness>`.
103
+
104
+ See Also
105
+ --------
106
+ gnp_random_graph, uniform_random_intersection_graph
107
+
108
+ References
109
+ ----------
110
+ .. [1] Nikoletseas, S. E., Raptopoulos, C., and Spirakis, P. G.
111
+ The existence and efficient construction of large independent sets
112
+ in general random intersection graphs. In ICALP (2004), J. D´ıaz,
113
+ J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142
114
+ of Lecture Notes in Computer Science, Springer, pp. 1029–1040.
115
+ """
116
+ if len(p) != m:
117
+ raise ValueError("Probability list p must have m elements.")
118
+ G = nx.empty_graph(n + m)
119
+ mset = range(n, n + m)
120
+ for u in range(n):
121
+ for v, q in zip(mset, p):
122
+ if seed.random() < q:
123
+ G.add_edge(u, v)
124
+ return nx.projected_graph(G, range(n))
llmeval-env/lib/python3.10/site-packages/networkx/generators/interval_graph.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generators for interval graph.
3
+ """
4
+ from collections.abc import Sequence
5
+
6
+ import networkx as nx
7
+
8
+ __all__ = ["interval_graph"]
9
+
10
+
11
+ @nx._dispatchable(graphs=None, returns_graph=True)
12
+ def interval_graph(intervals):
13
+ """Generates an interval graph for a list of intervals given.
14
+
15
+ In graph theory, an interval graph is an undirected graph formed from a set
16
+ of closed intervals on the real line, with a vertex for each interval
17
+ and an edge between vertices whose intervals intersect.
18
+ It is the intersection graph of the intervals.
19
+
20
+ More information can be found at:
21
+ https://en.wikipedia.org/wiki/Interval_graph
22
+
23
+ Parameters
24
+ ----------
25
+ intervals : a sequence of intervals, say (l, r) where l is the left end,
26
+ and r is the right end of the closed interval.
27
+
28
+ Returns
29
+ -------
30
+ G : networkx graph
31
+
32
+ Examples
33
+ --------
34
+ >>> intervals = [(-2, 3), [1, 4], (2, 3), (4, 6)]
35
+ >>> G = nx.interval_graph(intervals)
36
+ >>> sorted(G.edges)
37
+ [((-2, 3), (1, 4)), ((-2, 3), (2, 3)), ((1, 4), (2, 3)), ((1, 4), (4, 6))]
38
+
39
+ Raises
40
+ ------
41
+ :exc:`TypeError`
42
+ if `intervals` contains None or an element which is not
43
+ collections.abc.Sequence or not a length of 2.
44
+ :exc:`ValueError`
45
+ if `intervals` contains an interval such that min1 > max1
46
+ where min1,max1 = interval
47
+ """
48
+ intervals = list(intervals)
49
+ for interval in intervals:
50
+ if not (isinstance(interval, Sequence) and len(interval) == 2):
51
+ raise TypeError(
52
+ "Each interval must have length 2, and be a "
53
+ "collections.abc.Sequence such as tuple or list."
54
+ )
55
+ if interval[0] > interval[1]:
56
+ raise ValueError(f"Interval must have lower value first. Got {interval}")
57
+
58
+ graph = nx.Graph()
59
+
60
+ tupled_intervals = [tuple(interval) for interval in intervals]
61
+ graph.add_nodes_from(tupled_intervals)
62
+
63
+ while tupled_intervals:
64
+ min1, max1 = interval1 = tupled_intervals.pop()
65
+ for interval2 in tupled_intervals:
66
+ min2, max2 = interval2
67
+ if max1 >= min2 and max2 >= min1:
68
+ graph.add_edge(interval1, interval2)
69
+ return graph
llmeval-env/lib/python3.10/site-packages/networkx/generators/joint_degree_seq.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate graphs with a given joint degree and directed joint degree"""
2
+
3
+ import networkx as nx
4
+ from networkx.utils import py_random_state
5
+
6
+ __all__ = [
7
+ "is_valid_joint_degree",
8
+ "is_valid_directed_joint_degree",
9
+ "joint_degree_graph",
10
+ "directed_joint_degree_graph",
11
+ ]
12
+
13
+
14
+ @nx._dispatchable(graphs=None)
15
+ def is_valid_joint_degree(joint_degrees):
16
+ """Checks whether the given joint degree dictionary is realizable.
17
+
18
+ A *joint degree dictionary* is a dictionary of dictionaries, in
19
+ which entry ``joint_degrees[k][l]`` is an integer representing the
20
+ number of edges joining nodes of degree *k* with nodes of degree
21
+ *l*. Such a dictionary is realizable as a simple graph if and only
22
+ if the following conditions are satisfied.
23
+
24
+ - each entry must be an integer,
25
+ - the total number of nodes of degree *k*, computed by
26
+ ``sum(joint_degrees[k].values()) / k``, must be an integer,
27
+ - the total number of edges joining nodes of degree *k* with
28
+ nodes of degree *l* cannot exceed the total number of possible edges,
29
+ - each diagonal entry ``joint_degrees[k][k]`` must be even (this is
30
+ a convention assumed by the :func:`joint_degree_graph` function).
31
+
32
+
33
+ Parameters
34
+ ----------
35
+ joint_degrees : dictionary of dictionary of integers
36
+ A joint degree dictionary in which entry ``joint_degrees[k][l]``
37
+ is the number of edges joining nodes of degree *k* with nodes of
38
+ degree *l*.
39
+
40
+ Returns
41
+ -------
42
+ bool
43
+ Whether the given joint degree dictionary is realizable as a
44
+ simple graph.
45
+
46
+ References
47
+ ----------
48
+ .. [1] M. Gjoka, M. Kurant, A. Markopoulou, "2.5K Graphs: from Sampling
49
+ to Generation", IEEE Infocom, 2013.
50
+ .. [2] I. Stanton, A. Pinar, "Constructing and sampling graphs with a
51
+ prescribed joint degree distribution", Journal of Experimental
52
+ Algorithmics, 2012.
53
+ """
54
+
55
+ degree_count = {}
56
+ for k in joint_degrees:
57
+ if k > 0:
58
+ k_size = sum(joint_degrees[k].values()) / k
59
+ if not k_size.is_integer():
60
+ return False
61
+ degree_count[k] = k_size
62
+
63
+ for k in joint_degrees:
64
+ for l in joint_degrees[k]:
65
+ if not float(joint_degrees[k][l]).is_integer():
66
+ return False
67
+
68
+ if (k != l) and (joint_degrees[k][l] > degree_count[k] * degree_count[l]):
69
+ return False
70
+ elif k == l:
71
+ if joint_degrees[k][k] > degree_count[k] * (degree_count[k] - 1):
72
+ return False
73
+ if joint_degrees[k][k] % 2 != 0:
74
+ return False
75
+
76
+ # if all above conditions have been satisfied then the input
77
+ # joint degree is realizable as a simple graph.
78
+ return True
79
+
80
+
81
+ def _neighbor_switch(G, w, unsat, h_node_residual, avoid_node_id=None):
82
+ """Releases one free stub for ``w``, while preserving joint degree in G.
83
+
84
+ Parameters
85
+ ----------
86
+ G : NetworkX graph
87
+ Graph in which the neighbor switch will take place.
88
+ w : integer
89
+ Node id for which we will execute this neighbor switch.
90
+ unsat : set of integers
91
+ Set of unsaturated node ids that have the same degree as w.
92
+ h_node_residual: dictionary of integers
93
+ Keeps track of the remaining stubs for a given node.
94
+ avoid_node_id: integer
95
+ Node id to avoid when selecting w_prime.
96
+
97
+ Notes
98
+ -----
99
+ First, it selects *w_prime*, an unsaturated node that has the same degree
100
+ as ``w``. Second, it selects *switch_node*, a neighbor node of ``w`` that
101
+ is not connected to *w_prime*. Then it executes an edge swap i.e. removes
102
+ (``w``,*switch_node*) and adds (*w_prime*,*switch_node*). Gjoka et. al. [1]
103
+ prove that such an edge swap is always possible.
104
+
105
+ References
106
+ ----------
107
+ .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple
108
+ Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15
109
+ """
110
+
111
+ if (avoid_node_id is None) or (h_node_residual[avoid_node_id] > 1):
112
+ # select unsaturated node w_prime that has the same degree as w
113
+ w_prime = next(iter(unsat))
114
+ else:
115
+ # assume that the node pair (v,w) has been selected for connection. if
116
+ # - neighbor_switch is called for node w,
117
+ # - nodes v and w have the same degree,
118
+ # - node v=avoid_node_id has only one stub left,
119
+ # then prevent v=avoid_node_id from being selected as w_prime.
120
+
121
+ iter_var = iter(unsat)
122
+ while True:
123
+ w_prime = next(iter_var)
124
+ if w_prime != avoid_node_id:
125
+ break
126
+
127
+ # select switch_node, a neighbor of w, that is not connected to w_prime
128
+ w_prime_neighbs = G[w_prime] # slightly faster declaring this variable
129
+ for v in G[w]:
130
+ if (v not in w_prime_neighbs) and (v != w_prime):
131
+ switch_node = v
132
+ break
133
+
134
+ # remove edge (w,switch_node), add edge (w_prime,switch_node) and update
135
+ # data structures
136
+ G.remove_edge(w, switch_node)
137
+ G.add_edge(w_prime, switch_node)
138
+ h_node_residual[w] += 1
139
+ h_node_residual[w_prime] -= 1
140
+ if h_node_residual[w_prime] == 0:
141
+ unsat.remove(w_prime)
142
+
143
+
144
+ @py_random_state(1)
145
+ @nx._dispatchable(graphs=None, returns_graph=True)
146
+ def joint_degree_graph(joint_degrees, seed=None):
147
+ """Generates a random simple graph with the given joint degree dictionary.
148
+
149
+ Parameters
150
+ ----------
151
+ joint_degrees : dictionary of dictionary of integers
152
+ A joint degree dictionary in which entry ``joint_degrees[k][l]`` is the
153
+ number of edges joining nodes of degree *k* with nodes of degree *l*.
154
+ seed : integer, random_state, or None (default)
155
+ Indicator of random number generation state.
156
+ See :ref:`Randomness<randomness>`.
157
+
158
+ Returns
159
+ -------
160
+ G : Graph
161
+ A graph with the specified joint degree dictionary.
162
+
163
+ Raises
164
+ ------
165
+ NetworkXError
166
+ If *joint_degrees* dictionary is not realizable.
167
+
168
+ Notes
169
+ -----
170
+ In each iteration of the "while loop" the algorithm picks two disconnected
171
+ nodes *v* and *w*, of degree *k* and *l* correspondingly, for which
172
+ ``joint_degrees[k][l]`` has not reached its target yet. It then adds
173
+ edge (*v*, *w*) and increases the number of edges in graph G by one.
174
+
175
+ The intelligence of the algorithm lies in the fact that it is always
176
+ possible to add an edge between such disconnected nodes *v* and *w*,
177
+ even if one or both nodes do not have free stubs. That is made possible by
178
+ executing a "neighbor switch", an edge rewiring move that releases
179
+ a free stub while keeping the joint degree of G the same.
180
+
181
+ The algorithm continues for E (number of edges) iterations of
182
+ the "while loop", at the which point all entries of the given
183
+ ``joint_degrees[k][l]`` have reached their target values and the
184
+ construction is complete.
185
+
186
+ References
187
+ ----------
188
+ .. [1] M. Gjoka, B. Tillman, A. Markopoulou, "Construction of Simple
189
+ Graphs with a Target Joint Degree Matrix and Beyond", IEEE Infocom, '15
190
+
191
+ Examples
192
+ --------
193
+ >>> joint_degrees = {
194
+ ... 1: {4: 1},
195
+ ... 2: {2: 2, 3: 2, 4: 2},
196
+ ... 3: {2: 2, 4: 1},
197
+ ... 4: {1: 1, 2: 2, 3: 1},
198
+ ... }
199
+ >>> G = nx.joint_degree_graph(joint_degrees)
200
+ >>>
201
+ """
202
+
203
+ if not is_valid_joint_degree(joint_degrees):
204
+ msg = "Input joint degree dict not realizable as a simple graph"
205
+ raise nx.NetworkXError(msg)
206
+
207
+ # compute degree count from joint_degrees
208
+ degree_count = {k: sum(l.values()) // k for k, l in joint_degrees.items() if k > 0}
209
+
210
+ # start with empty N-node graph
211
+ N = sum(degree_count.values())
212
+ G = nx.empty_graph(N)
213
+
214
+ # for a given degree group, keep the list of all node ids
215
+ h_degree_nodelist = {}
216
+
217
+ # for a given node, keep track of the remaining stubs
218
+ h_node_residual = {}
219
+
220
+ # populate h_degree_nodelist and h_node_residual
221
+ nodeid = 0
222
+ for degree, num_nodes in degree_count.items():
223
+ h_degree_nodelist[degree] = range(nodeid, nodeid + num_nodes)
224
+ for v in h_degree_nodelist[degree]:
225
+ h_node_residual[v] = degree
226
+ nodeid += int(num_nodes)
227
+
228
+ # iterate over every degree pair (k,l) and add the number of edges given
229
+ # for each pair
230
+ for k in joint_degrees:
231
+ for l in joint_degrees[k]:
232
+ # n_edges_add is the number of edges to add for the
233
+ # degree pair (k,l)
234
+ n_edges_add = joint_degrees[k][l]
235
+
236
+ if (n_edges_add > 0) and (k >= l):
237
+ # number of nodes with degree k and l
238
+ k_size = degree_count[k]
239
+ l_size = degree_count[l]
240
+
241
+ # k_nodes and l_nodes consist of all nodes of degree k and l
242
+ k_nodes = h_degree_nodelist[k]
243
+ l_nodes = h_degree_nodelist[l]
244
+
245
+ # k_unsat and l_unsat consist of nodes of degree k and l that
246
+ # are unsaturated (nodes that have at least 1 available stub)
247
+ k_unsat = {v for v in k_nodes if h_node_residual[v] > 0}
248
+
249
+ if k != l:
250
+ l_unsat = {w for w in l_nodes if h_node_residual[w] > 0}
251
+ else:
252
+ l_unsat = k_unsat
253
+ n_edges_add = joint_degrees[k][l] // 2
254
+
255
+ while n_edges_add > 0:
256
+ # randomly pick nodes v and w that have degrees k and l
257
+ v = k_nodes[seed.randrange(k_size)]
258
+ w = l_nodes[seed.randrange(l_size)]
259
+
260
+ # if nodes v and w are disconnected then attempt to connect
261
+ if not G.has_edge(v, w) and (v != w):
262
+ # if node v has no free stubs then do neighbor switch
263
+ if h_node_residual[v] == 0:
264
+ _neighbor_switch(G, v, k_unsat, h_node_residual)
265
+
266
+ # if node w has no free stubs then do neighbor switch
267
+ if h_node_residual[w] == 0:
268
+ if k != l:
269
+ _neighbor_switch(G, w, l_unsat, h_node_residual)
270
+ else:
271
+ _neighbor_switch(
272
+ G, w, l_unsat, h_node_residual, avoid_node_id=v
273
+ )
274
+
275
+ # add edge (v, w) and update data structures
276
+ G.add_edge(v, w)
277
+ h_node_residual[v] -= 1
278
+ h_node_residual[w] -= 1
279
+ n_edges_add -= 1
280
+
281
+ if h_node_residual[v] == 0:
282
+ k_unsat.discard(v)
283
+ if h_node_residual[w] == 0:
284
+ l_unsat.discard(w)
285
+ return G
286
+
287
+
288
+ @nx._dispatchable(graphs=None)
289
+ def is_valid_directed_joint_degree(in_degrees, out_degrees, nkk):
290
+ """Checks whether the given directed joint degree input is realizable
291
+
292
+ Parameters
293
+ ----------
294
+ in_degrees : list of integers
295
+ in degree sequence contains the in degrees of nodes.
296
+ out_degrees : list of integers
297
+ out degree sequence contains the out degrees of nodes.
298
+ nkk : dictionary of dictionary of integers
299
+ directed joint degree dictionary. for nodes of out degree k (first
300
+ level of dict) and nodes of in degree l (second level of dict)
301
+ describes the number of edges.
302
+
303
+ Returns
304
+ -------
305
+ boolean
306
+ returns true if given input is realizable, else returns false.
307
+
308
+ Notes
309
+ -----
310
+ Here is the list of conditions that the inputs (in/out degree sequences,
311
+ nkk) need to satisfy for simple directed graph realizability:
312
+
313
+ - Condition 0: in_degrees and out_degrees have the same length
314
+ - Condition 1: nkk[k][l] is integer for all k,l
315
+ - Condition 2: sum(nkk[k])/k = number of nodes with partition id k, is an
316
+ integer and matching degree sequence
317
+ - Condition 3: number of edges and non-chords between k and l cannot exceed
318
+ maximum possible number of edges
319
+
320
+
321
+ References
322
+ ----------
323
+ [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka,
324
+ "Construction of Directed 2K Graphs". In Proc. of KDD 2017.
325
+ """
326
+ V = {} # number of nodes with in/out degree.
327
+ forbidden = {}
328
+ if len(in_degrees) != len(out_degrees):
329
+ return False
330
+
331
+ for idx in range(len(in_degrees)):
332
+ i = in_degrees[idx]
333
+ o = out_degrees[idx]
334
+ V[(i, 0)] = V.get((i, 0), 0) + 1
335
+ V[(o, 1)] = V.get((o, 1), 0) + 1
336
+
337
+ forbidden[(o, i)] = forbidden.get((o, i), 0) + 1
338
+
339
+ S = {} # number of edges going from in/out degree nodes.
340
+ for k in nkk:
341
+ for l in nkk[k]:
342
+ val = nkk[k][l]
343
+ if not float(val).is_integer(): # condition 1
344
+ return False
345
+
346
+ if val > 0:
347
+ S[(k, 1)] = S.get((k, 1), 0) + val
348
+ S[(l, 0)] = S.get((l, 0), 0) + val
349
+ # condition 3
350
+ if val + forbidden.get((k, l), 0) > V[(k, 1)] * V[(l, 0)]:
351
+ return False
352
+
353
+ return all(S[s] / s[0] == V[s] for s in S)
354
+
355
+
356
+ def _directed_neighbor_switch(
357
+ G, w, unsat, h_node_residual_out, chords, h_partition_in, partition
358
+ ):
359
+ """Releases one free stub for node w, while preserving joint degree in G.
360
+
361
+ Parameters
362
+ ----------
363
+ G : networkx directed graph
364
+ graph within which the edge swap will take place.
365
+ w : integer
366
+ node id for which we need to perform a neighbor switch.
367
+ unsat: set of integers
368
+ set of node ids that have the same degree as w and are unsaturated.
369
+ h_node_residual_out: dict of integers
370
+ for a given node, keeps track of the remaining stubs to be added.
371
+ chords: set of tuples
372
+ keeps track of available positions to add edges.
373
+ h_partition_in: dict of integers
374
+ for a given node, keeps track of its partition id (in degree).
375
+ partition: integer
376
+ partition id to check if chords have to be updated.
377
+
378
+ Notes
379
+ -----
380
+ First, it selects node w_prime that (1) has the same degree as w and
381
+ (2) is unsaturated. Then, it selects node v, a neighbor of w, that is
382
+ not connected to w_prime and does an edge swap i.e. removes (w,v) and
383
+ adds (w_prime,v). If neighbor switch is not possible for w using
384
+ w_prime and v, then return w_prime; in [1] it's proven that
385
+ such unsaturated nodes can be used.
386
+
387
+ References
388
+ ----------
389
+ [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka,
390
+ "Construction of Directed 2K Graphs". In Proc. of KDD 2017.
391
+ """
392
+ w_prime = unsat.pop()
393
+ unsat.add(w_prime)
394
+ # select node t, a neighbor of w, that is not connected to w_prime
395
+ w_neighbs = list(G.successors(w))
396
+ # slightly faster declaring this variable
397
+ w_prime_neighbs = list(G.successors(w_prime))
398
+
399
+ for v in w_neighbs:
400
+ if (v not in w_prime_neighbs) and w_prime != v:
401
+ # removes (w,v), add (w_prime,v) and update data structures
402
+ G.remove_edge(w, v)
403
+ G.add_edge(w_prime, v)
404
+
405
+ if h_partition_in[v] == partition:
406
+ chords.add((w, v))
407
+ chords.discard((w_prime, v))
408
+
409
+ h_node_residual_out[w] += 1
410
+ h_node_residual_out[w_prime] -= 1
411
+ if h_node_residual_out[w_prime] == 0:
412
+ unsat.remove(w_prime)
413
+ return None
414
+
415
+ # If neighbor switch didn't work, use unsaturated node
416
+ return w_prime
417
+
418
+
419
+ def _directed_neighbor_switch_rev(
420
+ G, w, unsat, h_node_residual_in, chords, h_partition_out, partition
421
+ ):
422
+ """The reverse of directed_neighbor_switch.
423
+
424
+ Parameters
425
+ ----------
426
+ G : networkx directed graph
427
+ graph within which the edge swap will take place.
428
+ w : integer
429
+ node id for which we need to perform a neighbor switch.
430
+ unsat: set of integers
431
+ set of node ids that have the same degree as w and are unsaturated.
432
+ h_node_residual_in: dict of integers
433
+ for a given node, keeps track of the remaining stubs to be added.
434
+ chords: set of tuples
435
+ keeps track of available positions to add edges.
436
+ h_partition_out: dict of integers
437
+ for a given node, keeps track of its partition id (out degree).
438
+ partition: integer
439
+ partition id to check if chords have to be updated.
440
+
441
+ Notes
442
+ -----
443
+ Same operation as directed_neighbor_switch except it handles this operation
444
+ for incoming edges instead of outgoing.
445
+ """
446
+ w_prime = unsat.pop()
447
+ unsat.add(w_prime)
448
+ # slightly faster declaring these as variables.
449
+ w_neighbs = list(G.predecessors(w))
450
+ w_prime_neighbs = list(G.predecessors(w_prime))
451
+ # select node v, a neighbor of w, that is not connected to w_prime.
452
+ for v in w_neighbs:
453
+ if (v not in w_prime_neighbs) and w_prime != v:
454
+ # removes (v,w), add (v,w_prime) and update data structures.
455
+ G.remove_edge(v, w)
456
+ G.add_edge(v, w_prime)
457
+ if h_partition_out[v] == partition:
458
+ chords.add((v, w))
459
+ chords.discard((v, w_prime))
460
+
461
+ h_node_residual_in[w] += 1
462
+ h_node_residual_in[w_prime] -= 1
463
+ if h_node_residual_in[w_prime] == 0:
464
+ unsat.remove(w_prime)
465
+ return None
466
+
467
+ # If neighbor switch didn't work, use the unsaturated node.
468
+ return w_prime
469
+
470
+
471
+ @py_random_state(3)
472
+ @nx._dispatchable(graphs=None, returns_graph=True)
473
+ def directed_joint_degree_graph(in_degrees, out_degrees, nkk, seed=None):
474
+ """Generates a random simple directed graph with the joint degree.
475
+
476
+ Parameters
477
+ ----------
478
+ degree_seq : list of tuples (of size 3)
479
+ degree sequence contains tuples of nodes with node id, in degree and
480
+ out degree.
481
+ nkk : dictionary of dictionary of integers
482
+ directed joint degree dictionary, for nodes of out degree k (first
483
+ level of dict) and nodes of in degree l (second level of dict)
484
+ describes the number of edges.
485
+ seed : hashable object, optional
486
+ Seed for random number generator.
487
+
488
+ Returns
489
+ -------
490
+ G : Graph
491
+ A directed graph with the specified inputs.
492
+
493
+ Raises
494
+ ------
495
+ NetworkXError
496
+ If degree_seq and nkk are not realizable as a simple directed graph.
497
+
498
+
499
+ Notes
500
+ -----
501
+ Similarly to the undirected version:
502
+ In each iteration of the "while loop" the algorithm picks two disconnected
503
+ nodes v and w, of degree k and l correspondingly, for which nkk[k][l] has
504
+ not reached its target yet i.e. (for given k,l): n_edges_add < nkk[k][l].
505
+ It then adds edge (v,w) and always increases the number of edges in graph G
506
+ by one.
507
+
508
+ The intelligence of the algorithm lies in the fact that it is always
509
+ possible to add an edge between disconnected nodes v and w, for which
510
+ nkk[degree(v)][degree(w)] has not reached its target, even if one or both
511
+ nodes do not have free stubs. If either node v or w does not have a free
512
+ stub, we perform a "neighbor switch", an edge rewiring move that releases a
513
+ free stub while keeping nkk the same.
514
+
515
+ The difference for the directed version lies in the fact that neighbor
516
+ switches might not be able to rewire, but in these cases unsaturated nodes
517
+ can be reassigned to use instead, see [1] for detailed description and
518
+ proofs.
519
+
520
+ The algorithm continues for E (number of edges in the graph) iterations of
521
+ the "while loop", at which point all entries of the given nkk[k][l] have
522
+ reached their target values and the construction is complete.
523
+
524
+ References
525
+ ----------
526
+ [1] B. Tillman, A. Markopoulou, C. T. Butts & M. Gjoka,
527
+ "Construction of Directed 2K Graphs". In Proc. of KDD 2017.
528
+
529
+ Examples
530
+ --------
531
+ >>> in_degrees = [0, 1, 1, 2]
532
+ >>> out_degrees = [1, 1, 1, 1]
533
+ >>> nkk = {1: {1: 2, 2: 2}}
534
+ >>> G = nx.directed_joint_degree_graph(in_degrees, out_degrees, nkk)
535
+ >>>
536
+ """
537
+ if not is_valid_directed_joint_degree(in_degrees, out_degrees, nkk):
538
+ msg = "Input is not realizable as a simple graph"
539
+ raise nx.NetworkXError(msg)
540
+
541
+ # start with an empty directed graph.
542
+ G = nx.DiGraph()
543
+
544
+ # for a given group, keep the list of all node ids.
545
+ h_degree_nodelist_in = {}
546
+ h_degree_nodelist_out = {}
547
+ # for a given group, keep the list of all unsaturated node ids.
548
+ h_degree_nodelist_in_unsat = {}
549
+ h_degree_nodelist_out_unsat = {}
550
+ # for a given node, keep track of the remaining stubs to be added.
551
+ h_node_residual_out = {}
552
+ h_node_residual_in = {}
553
+ # for a given node, keep track of the partition id.
554
+ h_partition_out = {}
555
+ h_partition_in = {}
556
+ # keep track of non-chords between pairs of partition ids.
557
+ non_chords = {}
558
+
559
+ # populate data structures
560
+ for idx, i in enumerate(in_degrees):
561
+ idx = int(idx)
562
+ if i > 0:
563
+ h_degree_nodelist_in.setdefault(i, [])
564
+ h_degree_nodelist_in_unsat.setdefault(i, set())
565
+ h_degree_nodelist_in[i].append(idx)
566
+ h_degree_nodelist_in_unsat[i].add(idx)
567
+ h_node_residual_in[idx] = i
568
+ h_partition_in[idx] = i
569
+
570
+ for idx, o in enumerate(out_degrees):
571
+ o = out_degrees[idx]
572
+ non_chords[(o, in_degrees[idx])] = non_chords.get((o, in_degrees[idx]), 0) + 1
573
+ idx = int(idx)
574
+ if o > 0:
575
+ h_degree_nodelist_out.setdefault(o, [])
576
+ h_degree_nodelist_out_unsat.setdefault(o, set())
577
+ h_degree_nodelist_out[o].append(idx)
578
+ h_degree_nodelist_out_unsat[o].add(idx)
579
+ h_node_residual_out[idx] = o
580
+ h_partition_out[idx] = o
581
+
582
+ G.add_node(idx)
583
+
584
+ nk_in = {}
585
+ nk_out = {}
586
+ for p in h_degree_nodelist_in:
587
+ nk_in[p] = len(h_degree_nodelist_in[p])
588
+ for p in h_degree_nodelist_out:
589
+ nk_out[p] = len(h_degree_nodelist_out[p])
590
+
591
+ # iterate over every degree pair (k,l) and add the number of edges given
592
+ # for each pair.
593
+ for k in nkk:
594
+ for l in nkk[k]:
595
+ n_edges_add = nkk[k][l]
596
+
597
+ if n_edges_add > 0:
598
+ # chords contains a random set of potential edges.
599
+ chords = set()
600
+
601
+ k_len = nk_out[k]
602
+ l_len = nk_in[l]
603
+ chords_sample = seed.sample(
604
+ range(k_len * l_len), n_edges_add + non_chords.get((k, l), 0)
605
+ )
606
+
607
+ num = 0
608
+ while len(chords) < n_edges_add:
609
+ i = h_degree_nodelist_out[k][chords_sample[num] % k_len]
610
+ j = h_degree_nodelist_in[l][chords_sample[num] // k_len]
611
+ num += 1
612
+ if i != j:
613
+ chords.add((i, j))
614
+
615
+ # k_unsat and l_unsat consist of nodes of in/out degree k and l
616
+ # that are unsaturated i.e. those nodes that have at least one
617
+ # available stub
618
+ k_unsat = h_degree_nodelist_out_unsat[k]
619
+ l_unsat = h_degree_nodelist_in_unsat[l]
620
+
621
+ while n_edges_add > 0:
622
+ v, w = chords.pop()
623
+ chords.add((v, w))
624
+
625
+ # if node v has no free stubs then do neighbor switch.
626
+ if h_node_residual_out[v] == 0:
627
+ _v = _directed_neighbor_switch(
628
+ G,
629
+ v,
630
+ k_unsat,
631
+ h_node_residual_out,
632
+ chords,
633
+ h_partition_in,
634
+ l,
635
+ )
636
+ if _v is not None:
637
+ v = _v
638
+
639
+ # if node w has no free stubs then do neighbor switch.
640
+ if h_node_residual_in[w] == 0:
641
+ _w = _directed_neighbor_switch_rev(
642
+ G,
643
+ w,
644
+ l_unsat,
645
+ h_node_residual_in,
646
+ chords,
647
+ h_partition_out,
648
+ k,
649
+ )
650
+ if _w is not None:
651
+ w = _w
652
+
653
+ # add edge (v,w) and update data structures.
654
+ G.add_edge(v, w)
655
+ h_node_residual_out[v] -= 1
656
+ h_node_residual_in[w] -= 1
657
+ n_edges_add -= 1
658
+ chords.discard((v, w))
659
+
660
+ if h_node_residual_out[v] == 0:
661
+ k_unsat.discard(v)
662
+ if h_node_residual_in[w] == 0:
663
+ l_unsat.discard(w)
664
+ return G
llmeval-env/lib/python3.10/site-packages/networkx/generators/lattice.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for generating grid graphs and lattices
2
+
3
+ The :func:`grid_2d_graph`, :func:`triangular_lattice_graph`, and
4
+ :func:`hexagonal_lattice_graph` functions correspond to the three
5
+ `regular tilings of the plane`_, the square, triangular, and hexagonal
6
+ tilings, respectively. :func:`grid_graph` and :func:`hypercube_graph`
7
+ are similar for arbitrary dimensions. Useful relevant discussion can
8
+ be found about `Triangular Tiling`_, and `Square, Hex and Triangle Grids`_
9
+
10
+ .. _regular tilings of the plane: https://en.wikipedia.org/wiki/List_of_regular_polytopes_and_compounds#Euclidean_tilings
11
+ .. _Square, Hex and Triangle Grids: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
12
+ .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling
13
+
14
+ """
15
+
16
+ from itertools import repeat
17
+ from math import sqrt
18
+
19
+ import networkx as nx
20
+ from networkx.classes import set_node_attributes
21
+ from networkx.exception import NetworkXError
22
+ from networkx.generators.classic import cycle_graph, empty_graph, path_graph
23
+ from networkx.relabel import relabel_nodes
24
+ from networkx.utils import flatten, nodes_or_number, pairwise
25
+
26
+ __all__ = [
27
+ "grid_2d_graph",
28
+ "grid_graph",
29
+ "hypercube_graph",
30
+ "triangular_lattice_graph",
31
+ "hexagonal_lattice_graph",
32
+ ]
33
+
34
+
35
+ @nx._dispatchable(graphs=None, returns_graph=True)
36
+ @nodes_or_number([0, 1])
37
+ def grid_2d_graph(m, n, periodic=False, create_using=None):
38
+ """Returns the two-dimensional grid graph.
39
+
40
+ The grid graph has each node connected to its four nearest neighbors.
41
+
42
+ Parameters
43
+ ----------
44
+ m, n : int or iterable container of nodes
45
+ If an integer, nodes are from `range(n)`.
46
+ If a container, elements become the coordinate of the nodes.
47
+
48
+ periodic : bool or iterable
49
+ If `periodic` is True, both dimensions are periodic. If False, none
50
+ are periodic. If `periodic` is iterable, it should yield 2 bool
51
+ values indicating whether the 1st and 2nd axes, respectively, are
52
+ periodic.
53
+
54
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
55
+ Graph type to create. If graph instance, then cleared before populated.
56
+
57
+ Returns
58
+ -------
59
+ NetworkX graph
60
+ The (possibly periodic) grid graph of the specified dimensions.
61
+
62
+ """
63
+ G = empty_graph(0, create_using)
64
+ row_name, rows = m
65
+ col_name, cols = n
66
+ G.add_nodes_from((i, j) for i in rows for j in cols)
67
+ G.add_edges_from(((i, j), (pi, j)) for pi, i in pairwise(rows) for j in cols)
68
+ G.add_edges_from(((i, j), (i, pj)) for i in rows for pj, j in pairwise(cols))
69
+
70
+ try:
71
+ periodic_r, periodic_c = periodic
72
+ except TypeError:
73
+ periodic_r = periodic_c = periodic
74
+
75
+ if periodic_r and len(rows) > 2:
76
+ first = rows[0]
77
+ last = rows[-1]
78
+ G.add_edges_from(((first, j), (last, j)) for j in cols)
79
+ if periodic_c and len(cols) > 2:
80
+ first = cols[0]
81
+ last = cols[-1]
82
+ G.add_edges_from(((i, first), (i, last)) for i in rows)
83
+ # both directions for directed
84
+ if G.is_directed():
85
+ G.add_edges_from((v, u) for u, v in G.edges())
86
+ return G
87
+
88
+
89
+ @nx._dispatchable(graphs=None, returns_graph=True)
90
+ def grid_graph(dim, periodic=False):
91
+ """Returns the *n*-dimensional grid graph.
92
+
93
+ The dimension *n* is the length of the list `dim` and the size in
94
+ each dimension is the value of the corresponding list element.
95
+
96
+ Parameters
97
+ ----------
98
+ dim : list or tuple of numbers or iterables of nodes
99
+ 'dim' is a tuple or list with, for each dimension, either a number
100
+ that is the size of that dimension or an iterable of nodes for
101
+ that dimension. The dimension of the grid_graph is the length
102
+ of `dim`.
103
+
104
+ periodic : bool or iterable
105
+ If `periodic` is True, all dimensions are periodic. If False all
106
+ dimensions are not periodic. If `periodic` is iterable, it should
107
+ yield `dim` bool values each of which indicates whether the
108
+ corresponding axis is periodic.
109
+
110
+ Returns
111
+ -------
112
+ NetworkX graph
113
+ The (possibly periodic) grid graph of the specified dimensions.
114
+
115
+ Examples
116
+ --------
117
+ To produce a 2 by 3 by 4 grid graph, a graph on 24 nodes:
118
+
119
+ >>> from networkx import grid_graph
120
+ >>> G = grid_graph(dim=(2, 3, 4))
121
+ >>> len(G)
122
+ 24
123
+ >>> G = grid_graph(dim=(range(7, 9), range(3, 6)))
124
+ >>> len(G)
125
+ 6
126
+ """
127
+ from networkx.algorithms.operators.product import cartesian_product
128
+
129
+ if not dim:
130
+ return empty_graph(0)
131
+
132
+ try:
133
+ func = (cycle_graph if p else path_graph for p in periodic)
134
+ except TypeError:
135
+ func = repeat(cycle_graph if periodic else path_graph)
136
+
137
+ G = next(func)(dim[0])
138
+ for current_dim in dim[1:]:
139
+ Gnew = next(func)(current_dim)
140
+ G = cartesian_product(Gnew, G)
141
+ # graph G is done but has labels of the form (1, (2, (3, 1))) so relabel
142
+ H = relabel_nodes(G, flatten)
143
+ return H
144
+
145
+
146
+ @nx._dispatchable(graphs=None, returns_graph=True)
147
+ def hypercube_graph(n):
148
+ """Returns the *n*-dimensional hypercube graph.
149
+
150
+ The nodes are the integers between 0 and ``2 ** n - 1``, inclusive.
151
+
152
+ For more information on the hypercube graph, see the Wikipedia
153
+ article `Hypercube graph`_.
154
+
155
+ .. _Hypercube graph: https://en.wikipedia.org/wiki/Hypercube_graph
156
+
157
+ Parameters
158
+ ----------
159
+ n : int
160
+ The dimension of the hypercube.
161
+ The number of nodes in the graph will be ``2 ** n``.
162
+
163
+ Returns
164
+ -------
165
+ NetworkX graph
166
+ The hypercube graph of dimension *n*.
167
+ """
168
+ dim = n * [2]
169
+ G = grid_graph(dim)
170
+ return G
171
+
172
+
173
+ @nx._dispatchable(graphs=None, returns_graph=True)
174
+ def triangular_lattice_graph(
175
+ m, n, periodic=False, with_positions=True, create_using=None
176
+ ):
177
+ r"""Returns the $m$ by $n$ triangular lattice graph.
178
+
179
+ The `triangular lattice graph`_ is a two-dimensional `grid graph`_ in
180
+ which each square unit has a diagonal edge (each grid unit has a chord).
181
+
182
+ The returned graph has $m$ rows and $n$ columns of triangles. Rows and
183
+ columns include both triangles pointing up and down. Rows form a strip
184
+ of constant height. Columns form a series of diamond shapes, staggered
185
+ with the columns on either side. Another way to state the size is that
186
+ the nodes form a grid of `m+1` rows and `(n + 1) // 2` columns.
187
+ The odd row nodes are shifted horizontally relative to the even rows.
188
+
189
+ Directed graph types have edges pointed up or right.
190
+
191
+ Positions of nodes are computed by default or `with_positions is True`.
192
+ The position of each node (embedded in a euclidean plane) is stored in
193
+ the graph using equilateral triangles with sidelength 1.
194
+ The height between rows of nodes is thus $\sqrt(3)/2$.
195
+ Nodes lie in the first quadrant with the node $(0, 0)$ at the origin.
196
+
197
+ .. _triangular lattice graph: http://mathworld.wolfram.com/TriangularGrid.html
198
+ .. _grid graph: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
199
+ .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling
200
+
201
+ Parameters
202
+ ----------
203
+ m : int
204
+ The number of rows in the lattice.
205
+
206
+ n : int
207
+ The number of columns in the lattice.
208
+
209
+ periodic : bool (default: False)
210
+ If True, join the boundary vertices of the grid using periodic
211
+ boundary conditions. The join between boundaries is the final row
212
+ and column of triangles. This means there is one row and one column
213
+ fewer nodes for the periodic lattice. Periodic lattices require
214
+ `m >= 3`, `n >= 5` and are allowed but misaligned if `m` or `n` are odd
215
+
216
+ with_positions : bool (default: True)
217
+ Store the coordinates of each node in the graph node attribute 'pos'.
218
+ The coordinates provide a lattice with equilateral triangles.
219
+ Periodic positions shift the nodes vertically in a nonlinear way so
220
+ the edges don't overlap so much.
221
+
222
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
223
+ Graph type to create. If graph instance, then cleared before populated.
224
+
225
+ Returns
226
+ -------
227
+ NetworkX graph
228
+ The *m* by *n* triangular lattice graph.
229
+ """
230
+ H = empty_graph(0, create_using)
231
+ if n == 0 or m == 0:
232
+ return H
233
+ if periodic:
234
+ if n < 5 or m < 3:
235
+ msg = f"m > 2 and n > 4 required for periodic. m={m}, n={n}"
236
+ raise NetworkXError(msg)
237
+
238
+ N = (n + 1) // 2 # number of nodes in row
239
+ rows = range(m + 1)
240
+ cols = range(N + 1)
241
+ # Make grid
242
+ H.add_edges_from(((i, j), (i + 1, j)) for j in rows for i in cols[:N])
243
+ H.add_edges_from(((i, j), (i, j + 1)) for j in rows[:m] for i in cols)
244
+ # add diagonals
245
+ H.add_edges_from(((i, j), (i + 1, j + 1)) for j in rows[1:m:2] for i in cols[:N])
246
+ H.add_edges_from(((i + 1, j), (i, j + 1)) for j in rows[:m:2] for i in cols[:N])
247
+ # identify boundary nodes if periodic
248
+ from networkx.algorithms.minors import contracted_nodes
249
+
250
+ if periodic is True:
251
+ for i in cols:
252
+ H = contracted_nodes(H, (i, 0), (i, m))
253
+ for j in rows[:m]:
254
+ H = contracted_nodes(H, (0, j), (N, j))
255
+ elif n % 2:
256
+ # remove extra nodes
257
+ H.remove_nodes_from((N, j) for j in rows[1::2])
258
+
259
+ # Add position node attributes
260
+ if with_positions:
261
+ ii = (i for i in cols for j in rows)
262
+ jj = (j for i in cols for j in rows)
263
+ xx = (0.5 * (j % 2) + i for i in cols for j in rows)
264
+ h = sqrt(3) / 2
265
+ if periodic:
266
+ yy = (h * j + 0.01 * i * i for i in cols for j in rows)
267
+ else:
268
+ yy = (h * j for i in cols for j in rows)
269
+ pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in H}
270
+ set_node_attributes(H, pos, "pos")
271
+ return H
272
+
273
+
274
+ @nx._dispatchable(graphs=None, returns_graph=True)
275
+ def hexagonal_lattice_graph(
276
+ m, n, periodic=False, with_positions=True, create_using=None
277
+ ):
278
+ """Returns an `m` by `n` hexagonal lattice graph.
279
+
280
+ The *hexagonal lattice graph* is a graph whose nodes and edges are
281
+ the `hexagonal tiling`_ of the plane.
282
+
283
+ The returned graph will have `m` rows and `n` columns of hexagons.
284
+ `Odd numbered columns`_ are shifted up relative to even numbered columns.
285
+
286
+ Positions of nodes are computed by default or `with_positions is True`.
287
+ Node positions creating the standard embedding in the plane
288
+ with sidelength 1 and are stored in the node attribute 'pos'.
289
+ `pos = nx.get_node_attributes(G, 'pos')` creates a dict ready for drawing.
290
+
291
+ .. _hexagonal tiling: https://en.wikipedia.org/wiki/Hexagonal_tiling
292
+ .. _Odd numbered columns: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
293
+
294
+ Parameters
295
+ ----------
296
+ m : int
297
+ The number of rows of hexagons in the lattice.
298
+
299
+ n : int
300
+ The number of columns of hexagons in the lattice.
301
+
302
+ periodic : bool
303
+ Whether to make a periodic grid by joining the boundary vertices.
304
+ For this to work `n` must be even and both `n > 1` and `m > 1`.
305
+ The periodic connections create another row and column of hexagons
306
+ so these graphs have fewer nodes as boundary nodes are identified.
307
+
308
+ with_positions : bool (default: True)
309
+ Store the coordinates of each node in the graph node attribute 'pos'.
310
+ The coordinates provide a lattice with vertical columns of hexagons
311
+ offset to interleave and cover the plane.
312
+ Periodic positions shift the nodes vertically in a nonlinear way so
313
+ the edges don't overlap so much.
314
+
315
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
316
+ Graph type to create. If graph instance, then cleared before populated.
317
+ If graph is directed, edges will point up or right.
318
+
319
+ Returns
320
+ -------
321
+ NetworkX graph
322
+ The *m* by *n* hexagonal lattice graph.
323
+ """
324
+ G = empty_graph(0, create_using)
325
+ if m == 0 or n == 0:
326
+ return G
327
+ if periodic and (n % 2 == 1 or m < 2 or n < 2):
328
+ msg = "periodic hexagonal lattice needs m > 1, n > 1 and even n"
329
+ raise NetworkXError(msg)
330
+
331
+ M = 2 * m # twice as many nodes as hexagons vertically
332
+ rows = range(M + 2)
333
+ cols = range(n + 1)
334
+ # make lattice
335
+ col_edges = (((i, j), (i, j + 1)) for i in cols for j in rows[: M + 1])
336
+ row_edges = (((i, j), (i + 1, j)) for i in cols[:n] for j in rows if i % 2 == j % 2)
337
+ G.add_edges_from(col_edges)
338
+ G.add_edges_from(row_edges)
339
+ # Remove corner nodes with one edge
340
+ G.remove_node((0, M + 1))
341
+ G.remove_node((n, (M + 1) * (n % 2)))
342
+
343
+ # identify boundary nodes if periodic
344
+ from networkx.algorithms.minors import contracted_nodes
345
+
346
+ if periodic:
347
+ for i in cols[:n]:
348
+ G = contracted_nodes(G, (i, 0), (i, M))
349
+ for i in cols[1:]:
350
+ G = contracted_nodes(G, (i, 1), (i, M + 1))
351
+ for j in rows[1:M]:
352
+ G = contracted_nodes(G, (0, j), (n, j))
353
+ G.remove_node((n, M))
354
+
355
+ # calc position in embedded space
356
+ ii = (i for i in cols for j in rows)
357
+ jj = (j for i in cols for j in rows)
358
+ xx = (0.5 + i + i // 2 + (j % 2) * ((i % 2) - 0.5) for i in cols for j in rows)
359
+ h = sqrt(3) / 2
360
+ if periodic:
361
+ yy = (h * j + 0.01 * i * i for i in cols for j in rows)
362
+ else:
363
+ yy = (h * j for i in cols for j in rows)
364
+ # exclude nodes not in G
365
+ pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in G}
366
+ set_node_attributes(G, pos, "pos")
367
+ return G