applied-ai-018 commited on
Commit
b9e633b
·
verified ·
1 Parent(s): a16a162

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/17.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/ao/pruning/__init__.py +19 -0
  7. venv/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__init__.py +0 -0
  10. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py +0 -0
  12. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__pycache__/activation_sparsifier.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/activation_sparsifier.py +418 -0
  15. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__init__.py +7 -0
  16. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py +309 -0
  21. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py +153 -0
  22. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py +0 -0
  25. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/_data_sparstity_utils.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py +39 -0
  29. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py +165 -0
  30. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/quantization_utils.py +130 -0
  31. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/FPGM_pruner.py +93 -0
  32. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/__init__.py +8 -0
  33. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py +310 -0
  34. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py +48 -0
  35. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/match_utils.py +59 -0
  36. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/parametrization.py +59 -0
  37. venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/prune_functions.py +475 -0
  38. venv/lib/python3.10/site-packages/torch/ao/pruning/_mappings.py +18 -0
  39. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__init__.py +0 -0
  40. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/base_sparsifier.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/nearly_diagonal_sparsifier.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/utils.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/base_sparsifier.py +353 -0
  46. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/nearly_diagonal_sparsifier.py +55 -0
  47. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/utils.py +136 -0
  48. venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py +200 -0
  49. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/17.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17ab46f7f61a85a3fc5d3ca51ed1d3a251475eb42cf5e77d079b13d7dab20013
3
+ size 9372
ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a1cbd65d46255e5cc77587f09f30d33aedce37e6761f9c97740c290dc8629d7
3
+ size 33555612
ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:607d9516890621a677964a3165b7c988b6b9cc955db106a28c9e9e3f92c232cd
3
+ size 33555627
ckpts/universal/global_step120/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eab58c8f2361b8c9914ef13068d1539dcdc22acf953cffa3f10a9e3e4b8a1425
3
+ size 33555533
ckpts/universal/global_step120/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dae4ced58b9af1a60c89ec4cf0844502a145c8ffe95c7c9af012ac7ef71b044
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/ao/pruning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Variables
2
+ from ._mappings import get_dynamic_sparse_quantized_mapping
3
+ from ._mappings import get_static_sparse_quantized_mapping
4
+
5
+ # Sparsifier
6
+ from .sparsifier.base_sparsifier import BaseSparsifier
7
+ from .sparsifier.weight_norm_sparsifier import WeightNormSparsifier
8
+ from .sparsifier.nearly_diagonal_sparsifier import NearlyDiagonalSparsifier
9
+
10
+ # Scheduler
11
+ from .scheduler.base_scheduler import BaseScheduler
12
+ from .scheduler.lambda_scheduler import LambdaSL
13
+ from .scheduler.cubic_scheduler import CubicSL
14
+
15
+ # Parametrizations
16
+ from .sparsifier.utils import FakeSparsity
17
+ from .sparsifier.utils import module_to_fqn
18
+ from .sparsifier.utils import fqn_to_module
19
+ from .sparsifier.utils import get_arg_info_from_tensor_fqn
venv/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (895 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (220 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__pycache__/activation_sparsifier.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/activation_sparsifier.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+ import torch
3
+ from collections import defaultdict
4
+ from torch import nn
5
+ import copy
6
+ from ...sparsifier.utils import fqn_to_module, module_to_fqn
7
+ import warnings
8
+
9
+ __all__ = ['ActivationSparsifier']
10
+
11
+
12
+ class ActivationSparsifier:
13
+ r"""
14
+ The Activation sparsifier class aims to sparsify/prune activations in a neural
15
+ network. The idea is to attach the sparsifier to a layer (or layers) and it
16
+ zeroes out the activations based on the mask_fn (or sparsification function)
17
+ input by the user.
18
+ The mask_fn is applied once all the inputs are aggregated and reduced i.e.
19
+ mask = mask_fn(reduce_fn(aggregate_fn(activations)))
20
+
21
+ Note::
22
+ The sparsification mask is computed on the input **before it goes through the attached layer**.
23
+
24
+ Args:
25
+ model (nn.Module):
26
+ The model whose layers will be sparsified. The layers that needs to be
27
+ sparsified should be added separately using the register_layer() function
28
+ aggregate_fn (Optional, Callable):
29
+ default aggregate_fn that is used if not specified while registering the layer.
30
+ specifies how inputs should be aggregated over time.
31
+ The aggregate_fn should usually take 2 torch tensors and return the aggregated tensor.
32
+ Example
33
+ def add_agg_fn(tensor1, tensor2): return tensor1 + tensor2
34
+ reduce_fn (Optional, Callable):
35
+ default reduce_fn that is used if not specified while registering the layer.
36
+ reduce_fn will be called on the aggregated tensor i.e. the tensor obtained after
37
+ calling agg_fn() on all inputs.
38
+ Example
39
+ def mean_reduce_fn(agg_tensor): return agg_tensor.mean(dim=0)
40
+ mask_fn (Optional, Callable):
41
+ default mask_fn that is used to create the sparsification mask using the tensor obtained after
42
+ calling the reduce_fn(). This is used by default if a custom one is passed in the
43
+ register_layer().
44
+ Note that the mask_fn() definition should contain the sparse arguments that is passed in sparse_config
45
+ arguments.
46
+ features (Optional, list):
47
+ default selected features to sparsify.
48
+ If this is non-empty, then the mask_fn will be applied for each feature of the input.
49
+ For example,
50
+ mask = [mask_fn(reduce_fn(aggregated_fn(input[feature])) for feature in features]
51
+ feature_dim (Optional, int):
52
+ default dimension of input features. Again, features along this dim will be chosen
53
+ for sparsification.
54
+ sparse_config (Dict):
55
+ Default configuration for the mask_fn. This config will be passed
56
+ with the mask_fn()
57
+
58
+ Example:
59
+ >>> # xdoctest: +SKIP
60
+ >>> model = SomeModel()
61
+ >>> act_sparsifier = ActivationSparsifier(...) # init activation sparsifier
62
+ >>> # Initialize aggregate_fn
63
+ >>> def agg_fn(x, y):
64
+ >>> return x + y
65
+ >>>
66
+ >>> # Initialize reduce_fn
67
+ >>> def reduce_fn(x):
68
+ >>> return torch.mean(x, dim=0)
69
+ >>>
70
+ >>> # Initialize mask_fn
71
+ >>> def mask_fn(data):
72
+ >>> return torch.eye(data.shape).to(data.device)
73
+ >>>
74
+ >>>
75
+ >>> act_sparsifier.register_layer(model.some_layer, aggregate_fn=agg_fn, reduce_fn=reduce_fn, mask_fn=mask_fn)
76
+ >>>
77
+ >>> # start training process
78
+ >>> for _ in [...]:
79
+ >>> # epoch starts
80
+ >>> # model.forward(), compute_loss() and model.backwards()
81
+ >>> # epoch ends
82
+ >>> act_sparsifier.step()
83
+ >>> # end training process
84
+ >>> sparsifier.squash_mask()
85
+ """
86
+ def __init__(self, model: nn.Module, aggregate_fn=None, reduce_fn=None, mask_fn=None,
87
+ features=None, feature_dim=None, **sparse_config):
88
+ self.model = model
89
+ self.defaults: Dict[str, Any] = defaultdict()
90
+ self.defaults['sparse_config'] = sparse_config
91
+
92
+ # functions
93
+ self.defaults['aggregate_fn'] = aggregate_fn
94
+ self.defaults['reduce_fn'] = reduce_fn
95
+ self.defaults['mask_fn'] = mask_fn
96
+
97
+ # default feature and feature_dim
98
+ self.defaults['features'] = features
99
+ self.defaults['feature_dim'] = feature_dim
100
+
101
+ self.data_groups: Dict[str, Dict] = defaultdict(dict) # contains all relevant info w.r.t each registered layer
102
+
103
+ self.state: Dict[str, Any] = defaultdict(dict) # layer name -> mask
104
+
105
+ @staticmethod
106
+ def _safe_rail_checks(args):
107
+ """Makes sure that some of the functions and attributes are not passed incorrectly
108
+ """
109
+
110
+ # if features are not None, then feature_dim must not be None
111
+ features, feature_dim = args['features'], args['feature_dim']
112
+ if features is not None:
113
+ assert feature_dim is not None, "need feature dim to select features"
114
+
115
+ # all the *_fns should be callable
116
+ fn_keys = ['aggregate_fn', 'reduce_fn', 'mask_fn']
117
+ for key in fn_keys:
118
+ fn = args[key]
119
+ assert callable(fn), 'function should be callable'
120
+
121
+ def _aggregate_hook(self, name):
122
+ """Returns hook that computes aggregate of activations passing through.
123
+ """
124
+
125
+ # gather some data
126
+ feature_dim = self.data_groups[name]['feature_dim']
127
+ features = self.data_groups[name]['features']
128
+ agg_fn = self.data_groups[name]['aggregate_fn']
129
+
130
+ def hook(module, input) -> None:
131
+ input_data = input[0]
132
+
133
+ data = self.data_groups[name].get('data') # aggregated data
134
+ if features is None:
135
+ # no features associated, data should not be a list
136
+ if data is None:
137
+ data = torch.zeros_like(input_data)
138
+ self.state[name]['mask'] = torch.ones_like(input_data)
139
+ out_data = agg_fn(data, input_data)
140
+ else:
141
+ # data should be a list [aggregated over each feature only]
142
+ if data is None:
143
+ out_data = [0 for _ in range(0, len(features))] # create one incase of 1st forward
144
+ self.state[name]['mask'] = [0 for _ in range(0, len(features))]
145
+ else:
146
+ out_data = data # a list
147
+
148
+ # compute aggregate over each feature
149
+ for feature_idx in range(len(features)):
150
+ # each feature is either a list or scalar, convert it to torch tensor
151
+ feature_tensor = torch.Tensor([features[feature_idx]]).long().to(input_data.device)
152
+ data_feature = torch.index_select(input_data, feature_dim, feature_tensor)
153
+ if data is None:
154
+ curr_data = torch.zeros_like(data_feature)
155
+ self.state[name]['mask'][feature_idx] = torch.ones_like(data_feature)
156
+ else:
157
+ curr_data = data[feature_idx]
158
+ out_data[feature_idx] = agg_fn(curr_data, data_feature)
159
+ self.data_groups[name]['data'] = out_data
160
+ return hook
161
+
162
+ def register_layer(self, layer: nn.Module, aggregate_fn=None, reduce_fn=None,
163
+ mask_fn=None, features=None, feature_dim=None, **sparse_config):
164
+ r"""
165
+ Registers a layer for sparsification. The layer should be part of self.model.
166
+ Specifically, registers a pre-forward hook to the layer. The hook will apply the aggregate_fn
167
+ and store the aggregated activations that is input over each step.
168
+
169
+ Note::
170
+ - There is no need to pass in the name of the layer as it is automatically computed as per
171
+ the fqn convention.
172
+
173
+ - All the functions (fn) passed as argument will be called at a dim, feature level.
174
+ """
175
+ name = module_to_fqn(self.model, layer)
176
+ assert name is not None, "layer not found in the model" # satisfy mypy
177
+
178
+ if name in self.data_groups: # unregister layer if already present
179
+ warnings.warn("layer already attached to the sparsifier, deregistering the layer and registering with new config")
180
+ self.unregister_layer(name=name)
181
+
182
+ local_args = copy.deepcopy(self.defaults)
183
+ update_dict = {
184
+ 'aggregate_fn': aggregate_fn,
185
+ 'reduce_fn': reduce_fn,
186
+ 'mask_fn': mask_fn,
187
+ 'features': features,
188
+ 'feature_dim': feature_dim,
189
+ 'layer': layer
190
+ }
191
+ local_args.update((arg, val) for arg, val in update_dict.items() if val is not None)
192
+ local_args['sparse_config'].update(sparse_config)
193
+
194
+ self._safe_rail_checks(local_args)
195
+
196
+ self.data_groups[name] = local_args
197
+ agg_hook = layer.register_forward_pre_hook(self._aggregate_hook(name=name))
198
+
199
+ self.state[name]['mask'] = None # mask will be created when model forward is called.
200
+
201
+ # attach agg hook
202
+ self.data_groups[name]['hook'] = agg_hook
203
+
204
+ # for serialization purposes, we know whether aggregate_hook is attached
205
+ # or sparsify_hook()
206
+ self.data_groups[name]['hook_state'] = "aggregate" # aggregate hook is attached
207
+
208
+ def get_mask(self, name: Optional[str] = None, layer: Optional[nn.Module] = None):
209
+ """
210
+ Returns mask associated to the layer.
211
+
212
+ The mask is
213
+ - a torch tensor is features for that layer is None.
214
+ - a list of torch tensors for each feature, otherwise
215
+
216
+ Note::
217
+ The shape of the mask is unknown until model.forward() is applied.
218
+ Hence, if get_mask() is called before model.forward(), an
219
+ error will be raised.
220
+ """
221
+ assert name is not None or layer is not None, "Need at least name or layer obj to retrieve mask"
222
+
223
+ if name is None:
224
+ assert layer is not None
225
+ name = module_to_fqn(self.model, layer)
226
+ assert name is not None, "layer not found in the specified model"
227
+
228
+ if name not in self.state:
229
+ raise ValueError("Error: layer with the given name not found")
230
+
231
+ mask = self.state[name].get('mask', None)
232
+
233
+ if mask is None:
234
+ raise ValueError("Error: shape unknown, call layer() routine at least once to infer mask")
235
+ return mask
236
+
237
+ def unregister_layer(self, name):
238
+ """Detaches the sparsifier from the layer
239
+ """
240
+
241
+ # detach any hooks attached
242
+ self.data_groups[name]['hook'].remove()
243
+
244
+ # pop from the state dict
245
+ self.state.pop(name)
246
+
247
+ # pop from the data groups
248
+ self.data_groups.pop(name)
249
+
250
+ def step(self):
251
+ """Internally calls the update_mask() function for each layer
252
+ """
253
+ with torch.no_grad():
254
+ for name, configs in self.data_groups.items():
255
+ data = configs['data']
256
+ self.update_mask(name, data, configs)
257
+
258
+ self.data_groups[name].pop('data') # reset the accumulated data
259
+
260
+ def update_mask(self, name, data, configs):
261
+ """
262
+ Called for each registered layer and does the following-
263
+ 1. apply reduce_fn on the aggregated activations
264
+ 2. use mask_fn to compute the sparsification mask
265
+
266
+ Note:
267
+ the reduce_fn and mask_fn is called for each feature, dim over the data
268
+ """
269
+ mask = self.get_mask(name)
270
+ sparse_config = configs['sparse_config']
271
+ features = configs['features']
272
+ reduce_fn = configs['reduce_fn']
273
+ mask_fn = configs['mask_fn']
274
+ if features is None:
275
+ data = reduce_fn(data)
276
+ mask.data = mask_fn(data, **sparse_config)
277
+ else:
278
+ for feature_idx in range(len(features)):
279
+ data_feature = reduce_fn(data[feature_idx])
280
+ mask[feature_idx].data = mask_fn(data_feature, **sparse_config)
281
+
282
+ def _sparsify_hook(self, name):
283
+ """Returns hook that applies sparsification mask to input entering the attached layer
284
+ """
285
+ mask = self.get_mask(name)
286
+ features = self.data_groups[name]['features']
287
+ feature_dim = self.data_groups[name]['feature_dim']
288
+
289
+ def hook(module, input):
290
+ input_data = input[0]
291
+ if features is None:
292
+ # apply to all the features
293
+ return input_data * mask
294
+ else:
295
+ # apply per feature, feature_dim
296
+ for feature_idx in range(0, len(features)):
297
+ feature = torch.Tensor([features[feature_idx]]).long().to(input_data.device)
298
+ sparsified = torch.index_select(input_data, feature_dim, feature) * mask[feature_idx]
299
+ input_data.index_copy_(feature_dim, feature, sparsified)
300
+ return input_data
301
+ return hook
302
+
303
+ def squash_mask(self, attach_sparsify_hook=True, **kwargs):
304
+ """
305
+ Unregisters aggregate hook that was applied earlier and registers sparsification hooks if
306
+ attach_sparsify_hook = True.
307
+ """
308
+ for name, configs in self.data_groups.items():
309
+ # unhook agg hook
310
+ configs['hook'].remove()
311
+ configs.pop('hook')
312
+ self.data_groups[name]['hook_state'] = "None"
313
+ if attach_sparsify_hook:
314
+ configs['hook'] = configs['layer'].register_forward_pre_hook(self._sparsify_hook(name))
315
+ configs['hook_state'] = "sparsify" # signals that sparsify hook is now attached
316
+
317
+ def _get_serializable_data_groups(self):
318
+ """Exclude hook and layer from the config keys before serializing
319
+
320
+ TODO: Might have to treat functions (reduce_fn, mask_fn etc) in a different manner while serializing.
321
+ For time-being, functions are treated the same way as other attributes
322
+ """
323
+ data_groups: Dict[str, Any] = defaultdict()
324
+ for name, config in self.data_groups.items():
325
+ new_config = {key: value for key, value in config.items() if key not in ['hook', 'layer']}
326
+ data_groups[name] = new_config
327
+ return data_groups
328
+
329
+ def _convert_mask(self, states_dict, sparse_coo=True):
330
+ r"""Converts the mask to sparse coo or dense depending on the `sparse_coo` argument.
331
+ If `sparse_coo=True`, then the mask is stored as sparse coo else dense tensor
332
+ """
333
+ states = copy.deepcopy(states_dict)
334
+ for state in states.values():
335
+ if state['mask'] is not None:
336
+ if isinstance(state['mask'], List):
337
+ for idx in range(len(state['mask'])):
338
+ if sparse_coo:
339
+ state['mask'][idx] = state['mask'][idx].to_sparse_coo()
340
+ else:
341
+ state['mask'][idx] = state['mask'][idx].to_dense()
342
+ else:
343
+ if sparse_coo:
344
+ state['mask'] = state['mask'].to_sparse_coo()
345
+ else:
346
+ state['mask'] = state['mask'].to_dense()
347
+ return states
348
+
349
+ def state_dict(self) -> Dict[str, Any]:
350
+ r"""Returns the state of the sparsifier as a :class:`dict`.
351
+
352
+ It contains:
353
+ * state - contains name -> mask mapping.
354
+ * data_groups - a dictionary containing all config information for each
355
+ layer
356
+ * defaults - the default config while creating the constructor
357
+ """
358
+ data_groups = self._get_serializable_data_groups()
359
+ state = self._convert_mask(self.state)
360
+ return {
361
+ 'state': state,
362
+ 'data_groups': data_groups,
363
+ 'defaults': self.defaults
364
+ }
365
+
366
+ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
367
+ r"""The load_state_dict() restores the state of the sparsifier based on the state_dict
368
+
369
+ Args:
370
+ * state_dict - the dictionary that to which the current sparsifier needs to be restored to
371
+ """
372
+ state = state_dict['state']
373
+ data_groups, defaults = state_dict['data_groups'], state_dict['defaults']
374
+
375
+ self.__set_state__({'state': state, 'data_groups': data_groups, 'defaults': defaults})
376
+
377
+ def __get_state__(self) -> Dict[str, Any]:
378
+
379
+ data_groups = self._get_serializable_data_groups()
380
+ state = self._convert_mask(self.state)
381
+ return {
382
+ 'defaults': self.defaults,
383
+ 'state': state,
384
+ 'data_groups': data_groups,
385
+ }
386
+
387
+ def __set_state__(self, state: Dict[str, Any]) -> None:
388
+ state['state'] = self._convert_mask(state['state'], sparse_coo=False) # convert mask to dense tensor
389
+ self.__dict__.update(state)
390
+
391
+ # need to attach layer and hook info into the data_groups
392
+ for name, config in self.data_groups.items():
393
+ # fetch layer
394
+ layer = fqn_to_module(self.model, name)
395
+ assert layer is not None # satisfy mypy
396
+
397
+ # if agg_mode is True, then layer in aggregate mode
398
+ if "hook_state" in config and config['hook_state'] == "aggregate":
399
+ hook = layer.register_forward_pre_hook(self._aggregate_hook(name))
400
+
401
+ elif "hook_state" in config and config["hook_state"] == "sparsify":
402
+ hook = layer.register_forward_pre_hook(self._sparsify_hook(name))
403
+
404
+ config['layer'] = layer
405
+ config['hook'] = hook # type: ignore[possibly-undefined]
406
+
407
+ def __repr__(self):
408
+ format_string = self.__class__.__name__ + ' ('
409
+ for name, config in self.data_groups.items():
410
+ format_string += '\n'
411
+ format_string += '\tData Group\n'
412
+ format_string += f'\t name: {name}\n'
413
+ for key in sorted(config.keys()):
414
+ if key in ['data', 'hook', 'reduce_fn', 'mask_fn', 'aggregate_fn']:
415
+ continue
416
+ format_string += f'\t {key}: {config[key]}\n'
417
+ format_string += ')'
418
+ return format_string
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .base_data_sparsifier import BaseDataSparsifier
2
+ from .data_norm_sparsifier import DataNormSparsifier
3
+
4
+ __all__ = [
5
+ "BaseDataSparsifier",
6
+ "DataNormSparsifier",
7
+ ]
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (373 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc ADDED
Binary file (4.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/base_data_sparsifier.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import torch
3
+ from typing import Optional, Tuple, List, Any, Dict
4
+ from ...sparsifier import base_sparsifier
5
+ from collections import defaultdict
6
+ from torch import nn
7
+ import copy
8
+ from ...sparsifier import utils
9
+ from torch.nn.utils import parametrize
10
+ import sys
11
+ import warnings
12
+
13
+ if not sys.warnoptions:
14
+ # to suppress repeated warnings when being used in a training loop.
15
+ warnings.simplefilter("once")
16
+
17
+ __all__ = ['BaseDataSparsifier']
18
+
19
+ EMBEDDING_TYPES = {
20
+ nn.Embedding,
21
+ nn.EmbeddingBag,
22
+ }
23
+
24
+ SUPPORTED_TYPES = {
25
+ torch.Tensor,
26
+ nn.Parameter,
27
+ *EMBEDDING_TYPES,
28
+ }
29
+
30
+
31
+ class _Container(nn.Module):
32
+ pass
33
+
34
+
35
+ class BaseDataSparsifier(base_sparsifier.BaseSparsifier):
36
+ r"""
37
+ Base Data Sparsifier class for all Data sparsifiers.
38
+ The abstract class accepts raw torch tensors / embedding / embedding bags (refer to SUPPORTED_TYPES above)
39
+ to prepare for sparsification.
40
+ In this case, mask (and parametrizations) is owned by the class and not by the user.
41
+ Specifically, the container object inside the class maintains the mask and parametrizations of the input data
42
+
43
+ Args:
44
+ data_list (list of tuples)
45
+ list of (name, data) tuples to sparsify. Lookup SUPPORTED_TYPES
46
+ for type of data. Internally, a container module handles the data sparsification.
47
+
48
+ defaults (dict)
49
+ default configurations will be attached to the
50
+ configuration. Only the keys that don't exist in the `config` will
51
+ be updated.
52
+ Example::
53
+ >>> # xdoctest: +SKIP
54
+ >>> data_list = [('tensor_1', torch.randn(3,3)), ('tensor_2', torch.randn(4,4))]
55
+ >>> defaults = {'sparsity_level': 0.7}
56
+ >>> sparsifier = DerivedDataSparsifier(data_list = data_list, **defaults) # Some sparsifier that inherits BaseDataSparsifier
57
+ >>> new_tensor_to_add = {'name': 'tensor_3', 'data': torch.randn(5,5), 'sparsity_level': 0.3}
58
+ >>> sparsifier.add_data(**new_tensor_to_add)
59
+ >>> # tensor_1 and tensor_2 will have sparsity_level of 0.7 but tensor_3 will have sparsity_level=0.3
60
+ """
61
+ def __init__(self, data_list: Optional[List[Tuple[str, Any]]] = None, **defaults):
62
+ super().__init__(defaults=defaults)
63
+
64
+ self._container = _Container()
65
+
66
+ self.data_groups: Dict[str, Dict] = defaultdict(dict) # name -> {**config}
67
+ if data_list is not None:
68
+ # add data with default config here
69
+ [self.add_data(name, data, **self.defaults) for name, data in data_list]
70
+
71
+ def prepare(self):
72
+ raise NotImplementedError("this function is undefined for this class")
73
+
74
+ def _extract_weight(self, data):
75
+ # extract the weight parameter instead of underlying data
76
+ if type(data) in [torch.Tensor, nn.Parameter]:
77
+ return data
78
+ elif type(data) in EMBEDDING_TYPES:
79
+ return data.weight
80
+
81
+ def add_data(self, name: str, data, reuse_mask=True, **config):
82
+ r""" Configures and parametrizes the internal container model with name and data.
83
+
84
+ **Note**:
85
+ 1. If the data with name already exists, it replaces the data.
86
+ 2. While replacing, the old mask is reused when `reuse_mask=True`
87
+ 3. If `reuse_mask=True`, then the replacing data needs to have the same shape as that of old data.
88
+ 4. By default, the config of the replaced data is used as config for the replacing data, unless something
89
+ is specified in the config dictionary.
90
+ """
91
+ assert type(data) in SUPPORTED_TYPES, \
92
+ "specified data type not supported at the moment"
93
+ local_args = copy.deepcopy(self.defaults)
94
+ local_args.update(config)
95
+ weight = self._extract_weight(data)
96
+
97
+ # Bookkeeping in the container class
98
+ mask = local_args.get('mask', torch.ones_like(weight))
99
+ param_class = local_args.get('parametrization', utils.FakeSparsity)
100
+
101
+ if name in self.state:
102
+ # If the named data already exists - replace
103
+ warnings.warn("Replacing existing data of the same name. - Did you mean a different name?")
104
+
105
+ # reuse old config
106
+ old_args = self.data_groups[name]
107
+ local_args = copy.deepcopy(old_args)
108
+ local_args.update(config)
109
+
110
+ if reuse_mask:
111
+ current_data = self.get_data(name=name)
112
+ assert weight.shape == current_data.shape, \
113
+ "to retain the old mask, the shape of the new data must be the same as the previous one"
114
+ mask = self.get_mask(name=name) # reuse mask instead of creating a new one
115
+
116
+ self._delete_data(name=name)
117
+
118
+ # parameter creates a deepcopy of the weight inside, so create a buffer
119
+ self._container.register_buffer(name=name, tensor=weight)
120
+ parametrize.register_parametrization(self._container, name, param_class(mask))
121
+ self.state[name]['mask'] = mask
122
+ self.data_groups[name] = local_args
123
+ return getattr(self._container, name)
124
+
125
+ def get_data(self, name: str, return_original: bool = True):
126
+ r"""Returns weight tensor (or data)
127
+ Args:
128
+ - name: name of the data to be returned
129
+ - return_original returns weight tensor without applying parametrization if True
130
+ else - returns the sparsified version (parametrized)
131
+ """
132
+ if name not in self.data_groups:
133
+ raise ValueError("data with specified name does not exist")
134
+
135
+ if return_original:
136
+ if not parametrize.is_parametrized(self._container, name):
137
+ raise ValueError("mask squashed - original mask value does not exist")
138
+ data = getattr(self._container.parametrizations, name).original
139
+ return data
140
+ else:
141
+ return getattr(self._container, name)
142
+
143
+ def _convert_mask(self, states, sparse_coo=True):
144
+ r"""Converts the mask to sparse coo or dense tensors depending on the `sparse_coo` argument.
145
+ """
146
+ states = copy.deepcopy(states)
147
+ for state in states.values():
148
+ if sparse_coo:
149
+ state['mask'] = state['mask'].to_sparse_coo()
150
+ else:
151
+ state['mask'] = state['mask'].to_dense()
152
+
153
+ return states
154
+
155
+ def state_dict(self):
156
+ r"""Returns the state of the optimizer as a :class:`dict`.
157
+
158
+ It contains:
159
+ * state - contains name -> mask mapping.
160
+ * data_groups - a list containing all sparsity configuration groups
161
+ with the key name specifying the name of the data
162
+ * container_state_dict - the state dictionary of the internal
163
+ container model used for sparsification
164
+ """
165
+ state = self._convert_mask(self.state)
166
+ return {
167
+ 'state': state,
168
+ 'data_groups': self.data_groups,
169
+ '_container': self._container.state_dict()
170
+ }
171
+
172
+ def _load_container_from_state(self, states, data_groups, container_state_dict):
173
+ r"""This restores the state of the container specifically based on the data present in state and data_groups
174
+ If the data was parametrized, then the data would be added to the container and then parametrized,
175
+ else it would just add the attribute the container.
176
+ """
177
+ for name, state in states.items():
178
+ config_name = data_groups.get(name, None)
179
+ if config_name is None:
180
+ raise RuntimeError(f"Error loading {name}")
181
+
182
+ # check if the data with such a name was parametrized, if so parametrize
183
+ # otherwise just set the attribute and continue
184
+ parametrized_name = f'parametrizations.{name}.original'
185
+ parametrized = False
186
+ data = container_state_dict.get(name, None)
187
+ if name in container_state_dict:
188
+ # the parametrization was probably removed for this
189
+ data = container_state_dict.get(name)
190
+
191
+ elif parametrized_name in container_state_dict:
192
+ # so the weight was parametrized
193
+ data = container_state_dict.get(parametrized_name)
194
+ parametrized = True
195
+
196
+ else:
197
+ raise RuntimeError(f"Error loading {name}")
198
+
199
+ self._container.register_buffer(name=name, tensor=data)
200
+
201
+ if parametrized:
202
+ # register parameter if parametrized
203
+ mask = state.get('mask', torch.ones_like(data))
204
+ param_class = data_groups.get('parametrization', utils.FakeSparsity) # change once public_api for utils is fixed!
205
+ parametrize.register_parametrization(self._container, name, param_class(mask))
206
+
207
+ def load_state_dict(self, state_dict, strict=True):
208
+ r"""The load_state_dict() restores the state of the sparsifier based on the state_dict
209
+
210
+ Args:
211
+ * state_dict - the dictionary that to which the current sparsifier needs to be restored to
212
+ * strict - If True - the sparsifier is reset and is restored exactly to the state in state_dict.
213
+ If False - the current sparsifier is not reset before loading the state_dict i.e. data added
214
+ before loading the state_dict is not erased.
215
+ """
216
+ states = copy.deepcopy(state_dict['state'])
217
+ data_groups = copy.deepcopy(state_dict['data_groups'])
218
+ container_state_dict = copy.deepcopy(state_dict['_container'])
219
+
220
+ states = self._convert_mask(states, sparse_coo=False) # convert sparse coo mask to dense
221
+ if strict:
222
+ # if strict load -> then reset container
223
+ self._container = _Container()
224
+
225
+ self._load_container_from_state(states, data_groups, container_state_dict)
226
+
227
+ if not strict:
228
+ states.update(self.state)
229
+ data_groups.update(self.data_groups)
230
+
231
+ self.__setstate__({'state': states, 'data_groups': data_groups})
232
+
233
+ def __setstate__(self, state):
234
+ if '_container' in state: # If container object is in state then load model
235
+ container_dict = state.pop('_container')
236
+ self._container = _Container()
237
+ state['state'] = self._convert_mask(state['state'], sparse_coo=False) # convert sparse coo mask to dense
238
+ self._load_container_from_state(state['state'], state['data_groups'], container_dict)
239
+
240
+ self.__dict__.update(state)
241
+
242
+ def __getstate__(self):
243
+ state = self._convert_mask(self.state)
244
+ return {
245
+ 'defaults': self.defaults,
246
+ 'state': state,
247
+ 'data_groups': self.data_groups,
248
+ '_container': self._container.state_dict()
249
+ }
250
+
251
+ def __repr__(self):
252
+ format_string = self.__class__.__name__ + ' ('
253
+ for name, sparse_args in self.data_groups.items():
254
+ format_string += '\n'
255
+ format_string += '\tData Group\n'
256
+ format_string += f'\t name: {name}\n'
257
+ for key in sorted(sparse_args.keys()):
258
+ if key == 'data':
259
+ continue
260
+ format_string += f'\t {key}: {sparse_args[key]}\n'
261
+ format_string += ')'
262
+ return format_string
263
+
264
+ def get_mask(self, name: str):
265
+ if name not in self.state:
266
+ raise ValueError("data with specified name does not exist")
267
+ return self.state[name]['mask']
268
+
269
+ def squash_mask(self, *args, leave_parametrized=True, names=None, **kwargs):
270
+ r"""Squashes the sparse masks into the appropriate tensors. Also, accepts list of strings
271
+ to squash mask for. If none, squashes mask for all the keys
272
+ kwargs:
273
+ * names: list of strings to squash mask for
274
+ * sparsified: if true - applies the mask before squashing
275
+ if false - does not apply the mask before squashing
276
+ """
277
+ if names is None:
278
+ names = list(self.data_groups.keys())
279
+ for name in names:
280
+ parametrize.remove_parametrizations(self._container, name, leave_parametrized=leave_parametrized)
281
+
282
+ def step(self):
283
+ if not self.enable_mask_update:
284
+ return
285
+ with torch.no_grad():
286
+ for name, config in self.data_groups.items():
287
+ # get non-sparsified data
288
+ data = self.get_data(name)
289
+ # need name for the mask otherwise can directly pass mask?
290
+ self.update_mask(name, data, **config)
291
+
292
+ @abc.abstractmethod
293
+ def update_mask(self, name, data, **kwargs):
294
+ pass
295
+
296
+ def _delete_data(self, name):
297
+ """Detaches some data from the sparsifier.
298
+
299
+ Args:
300
+ name (str)
301
+ Name of the data to be removed from the sparsifier
302
+
303
+ Note:
304
+ Currently private. Kind of used as a helper function when replacing data of the same name
305
+ """
306
+ self.squash_mask(names=[name], leave_parametrized=False) # do not apply the mask while deleting
307
+ delattr(self._container, name)
308
+ self.state.pop(name)
309
+ self.data_groups.pop(name)
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+ from functools import reduce
4
+ from typing import Any, List, Optional, Tuple
5
+
6
+ from .base_data_sparsifier import BaseDataSparsifier
7
+ import operator
8
+
9
+ __all__ = ['DataNormSparsifier']
10
+
11
+
12
+ class DataNormSparsifier(BaseDataSparsifier):
13
+ r"""L1-Norm Sparsifier
14
+ This sparsifier computes the *L1-norm* of every sparse block and "zeroes-out" the
15
+ ones with the lowest norm. The level of sparsity defines how many of the
16
+ blocks is removed.
17
+ This sparsifier is controlled by three variables:
18
+ 1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
19
+ 2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
20
+ the sparse blocks originate at the zero-index of the tensor.
21
+ 3. `zeros_per_block` is the number of zeros that we are expecting in each
22
+ sparse block. By default we assume that all elements within a block are
23
+ zeroed-out. However, setting this variable sets the target number of
24
+ zeros per block. The zeros within each block are chosen as the *smallest
25
+ absolute values*.
26
+ Args:
27
+ sparsity_level: The target level of sparsity
28
+ sparse_block_shape: The shape of a sparse block
29
+ zeros_per_block: Number of zeros in a sparse block
30
+ Note::
31
+ All arguments to the DataNormSparsifier constructor are "default"
32
+ arguments and could be overriden by the configuration provided in the
33
+ `add_data` step.
34
+ """
35
+ def __init__(self, data_list: Optional[List[Tuple[str, Any]]] = None, sparsity_level: float = 0.5,
36
+ sparse_block_shape: Tuple[int, int] = (1, 4),
37
+ zeros_per_block: Optional[int] = None, norm: str = 'L1'):
38
+ if zeros_per_block is None:
39
+ zeros_per_block = reduce(operator.mul, sparse_block_shape)
40
+
41
+ assert norm in ['L1', 'L2'], "only L1 and L2 norm supported at the moment"
42
+
43
+ defaults = {'sparsity_level': sparsity_level, 'sparse_block_shape': sparse_block_shape,
44
+ 'zeros_per_block': zeros_per_block}
45
+ self.norm = norm
46
+ super().__init__(data_list=data_list, **defaults)
47
+
48
+ def __get_scatter_folded_mask(self, data, dim, indices, output_size, sparse_block_shape):
49
+ mask = torch.ones_like(data)
50
+ mask.scatter_(dim=dim, index=indices, value=0) # zeroing out
51
+ mask = F.fold(mask, output_size=output_size, kernel_size=sparse_block_shape,
52
+ stride=sparse_block_shape)
53
+ mask = mask.to(torch.int8)
54
+ return mask
55
+
56
+ def __get_block_level_mask(self, data,
57
+ sparse_block_shape, zeros_per_block):
58
+
59
+ # Assume data is a squeezed tensor
60
+ height, width = data.shape[-2], data.shape[-1]
61
+ block_height, block_width = sparse_block_shape
62
+ values_per_block = block_height * block_width
63
+
64
+ # just return zeros if zeroing all elements in block
65
+ if values_per_block == zeros_per_block:
66
+ return torch.zeros_like(data, dtype=torch.int8)
67
+
68
+ # creating additional height and width to support padding
69
+ dh = (block_height - height % block_height) % block_height
70
+ dw = (block_width - width % block_width) % block_width
71
+
72
+ # create a new padded tensor like data (to match the block_shape)
73
+ padded_data = torch.ones(height + dh, width + dw, dtype=data.dtype, device=data.device)
74
+ padded_data = padded_data * torch.nan # can also be replaced with 0 to stop the removal of edge data
75
+ padded_data[0:height, 0:width] = data
76
+ unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape,
77
+ stride=sparse_block_shape)
78
+
79
+ _, sorted_idx = torch.sort(unfolded_data, dim=1)
80
+ sorted_idx = sorted_idx[:, :zeros_per_block, :] # zero out zeros_per_block number of elements
81
+
82
+ mask = self.__get_scatter_folded_mask(data=unfolded_data, dim=1, indices=sorted_idx, output_size=padded_data.shape,
83
+ sparse_block_shape=sparse_block_shape)
84
+
85
+ mask = mask.squeeze(0).squeeze(0)[:height, :width].contiguous() # remove padding and make contiguous
86
+ return mask
87
+
88
+ def __get_data_level_mask(self, data, sparsity_level,
89
+ sparse_block_shape):
90
+
91
+ height, width = data.shape[-2], data.shape[-1]
92
+ block_height, block_width = sparse_block_shape
93
+ dh = (block_height - height % block_height) % block_height
94
+ dw = (block_width - width % block_width) % block_width
95
+
96
+ data_norm = F.avg_pool2d(data[None, None, :], kernel_size=sparse_block_shape,
97
+ stride=sparse_block_shape, ceil_mode=True)
98
+
99
+ values_per_block = reduce(operator.mul, sparse_block_shape)
100
+
101
+ data_norm = data_norm.flatten()
102
+ num_blocks = len(data_norm)
103
+
104
+ data_norm = data_norm.repeat(1, values_per_block, 1) # get similar shape after unfold
105
+ _, sorted_idx = torch.sort(data_norm, dim=2)
106
+
107
+ threshold_idx = round(sparsity_level * num_blocks) # number of blocks to remove
108
+ sorted_idx = sorted_idx[:, :, :threshold_idx]
109
+
110
+ mask = self.__get_scatter_folded_mask(data=data_norm, dim=2, indices=sorted_idx,
111
+ output_size=(height + dh, width + dw),
112
+ sparse_block_shape=sparse_block_shape)
113
+
114
+ mask = mask.squeeze(0).squeeze(0)[:height, :width] # squeeze only the first 2 dimension
115
+ return mask
116
+
117
+ def update_mask(self, name, data, sparsity_level,
118
+ sparse_block_shape, zeros_per_block, **kwargs):
119
+
120
+ values_per_block = reduce(operator.mul, sparse_block_shape)
121
+ if zeros_per_block > values_per_block:
122
+ raise ValueError("Number of zeros per block cannot be more than "
123
+ "the total number of elements in that block.")
124
+ if zeros_per_block < 0:
125
+ raise ValueError("Number of zeros per block should be positive.")
126
+
127
+ if self.norm == 'L1':
128
+ data_norm = torch.abs(data).squeeze() # absolute value based (L1)
129
+ else:
130
+ data_norm = (data * data).squeeze() # square every element for L2
131
+
132
+ if len(data_norm.shape) > 2: # only supports 2 dimensional data at the moment
133
+ raise ValueError("only supports 2-D at the moment")
134
+
135
+ elif len(data_norm.shape) == 1: # in case the data is bias (or 1D)
136
+ data_norm = data_norm[None, :]
137
+
138
+ mask = self.get_mask(name)
139
+ if sparsity_level <= 0 or zeros_per_block == 0:
140
+ mask.data = torch.ones_like(mask)
141
+ elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
142
+ mask.data = torch.zeros_like(mask)
143
+
144
+ # Fetch the high level mask that zeros out entire blocks
145
+ data_lvl_mask = self.__get_data_level_mask(data=data_norm, sparsity_level=sparsity_level,
146
+ sparse_block_shape=sparse_block_shape)
147
+
148
+ # Fetch block level mask that zeros out 'zeros_per_block' number of elements in every block
149
+ block_lvl_mask = self.__get_block_level_mask(data=data_norm, sparse_block_shape=sparse_block_shape,
150
+ zeros_per_block=zeros_per_block)
151
+
152
+ # zero out the entries inside those blocks whose block is sparsified
153
+ mask.data = torch.where(data_lvl_mask == 1, data_lvl_mask, block_lvl_mask)
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (224 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (234 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/_data_sparstity_utils.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from torch.ao.pruning._experimental.data_sparsifier.base_data_sparsifier import SUPPORTED_TYPES
3
+
4
+ logger: logging.Logger = logging.getLogger(__name__)
5
+
6
+
7
+ def _attach_model_to_data_sparsifier(module, data_sparsifier, config=None):
8
+ """Attaches a data sparsifier to all the layers of the module.
9
+ Essentially, loop over all the weight parameters in the module and
10
+ attach it to the data sparsifier.
11
+ Note::
12
+ The '.' in the layer names are replaced with '_' (refer to _get_valid_name() below)
13
+ before attaching to the sparsifier. This is because, the data
14
+ sparsifier uses a dummy model inside to store the weight parameters.
15
+ """
16
+ if config is None:
17
+ config = {}
18
+ for name, parameter in module.named_parameters():
19
+ if type(parameter) in SUPPORTED_TYPES:
20
+ valid_name = _get_valid_name(name)
21
+ # will be defaulted to default configs
22
+ data_sparsifier.add_data(name=valid_name, data=parameter, **config.get(valid_name, {}))
23
+
24
+
25
+ def _get_valid_name(name):
26
+ return name.replace('.', '_') # . is not allowed as a name
27
+
28
+
29
+ def _log_sparsified_level(model, data_sparsifier) -> None:
30
+ # Show the level of sparsity AFTER step:
31
+ for name, parameter in model.named_parameters():
32
+ if type(parameter) not in SUPPORTED_TYPES:
33
+ continue
34
+ valid_name = _get_valid_name(name)
35
+ mask = data_sparsifier.get_mask(name=valid_name)
36
+ sparsity_level = 1.0 - mask.float().mean()
37
+ logger.info(
38
+ "Sparsity in layer %s = % .2%", name, sparsity_level
39
+ )
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from copy import deepcopy
3
+ import torch
4
+ from typing import Any, Optional, Dict
5
+ import pytorch_lightning as pl # type: ignore[import]
6
+
7
+ from ._data_sparstity_utils import (
8
+ _attach_model_to_data_sparsifier,
9
+ _log_sparsified_level,
10
+ _get_valid_name
11
+ )
12
+
13
+
14
+ class PostTrainingDataSparsity(pl.callbacks.Callback):
15
+ """Lightning callback that enables post-training sparsity.
16
+
17
+ This callback aims to sparsify the model inside lightning module after training.
18
+ **Note that the model is copied and then sparsified, so the existing model is not modified**
19
+
20
+ The sparsified model can be used for comparison and can be accessed using
21
+ <callback_obj>.sparsified
22
+
23
+ Args:
24
+ data_sparsifier_class (some implemented class of BaseDataSparsifier)
25
+ The data sparsifier object of this class is created when the
26
+ training starts.
27
+ Note: Objects should not be passed in here as they are created
28
+ once the training completes.
29
+
30
+ data_sparsifier_args (Dict)
31
+ Dictionary of args to be passed to the data sparsifier.
32
+ Note: data_list arg should be ignored
33
+
34
+ Hooks implemented:
35
+ on_fit_end()
36
+ 1. copies the model and attaches it to the sparsifier
37
+ 2. sparsier step() is called
38
+ 3. squashes the mask()
39
+ """
40
+ def __init__(self, data_sparsifier_class, data_sparsifier_args):
41
+ super().__init__()
42
+ self.data_sparsifier_class = data_sparsifier_class
43
+ self.data_sparsifier_args = data_sparsifier_args
44
+ self.data_sparsifier: Any = None
45
+ self.sparsified: Optional[torch.nn.Module] = None
46
+
47
+ def on_fit_end(self, trainer, pl_module) -> None:
48
+ self.sparsified = deepcopy(pl_module.model).eval()
49
+ self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)
50
+
51
+ _attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier)
52
+
53
+ self.data_sparsifier.step()
54
+
55
+ self.data_sparsifier.squash_mask() # currently squashes params for all mask
56
+
57
+ _log_sparsified_level(self.sparsified, self.data_sparsifier)
58
+
59
+
60
+ class TrainingAwareDataSparsity(pl.callbacks.Callback):
61
+ """Lightning callback that enables in-training sparsity.
62
+
63
+ This callback aims to sparsify the model inside lightning module during training.
64
+ **Note that the model is copied and then sparsified, so the existing model is not modified**
65
+
66
+ The sparsified model can be used for comparison and can be accessed using
67
+ <callback_obj>.sparsified
68
+
69
+ Args:
70
+ data_sparsifier_class (some implemented class of BaseDataSparsifier)
71
+ The data sparsifier object of this class is created when the
72
+ training starts.
73
+ Note: Objects should not be passed in here as they are created
74
+ when the training starts.
75
+
76
+ data_sparsifier_args (Dict)
77
+ Dictionary of args to be passed to the data sparsifier.
78
+ Note: data_list arg should be ignored
79
+
80
+ data_scheduler_class (some implemented class of BaseDataScheduler)
81
+ The data scheduler of this class is created when the training starts
82
+ Note: Objects should not be passed in here as they are created
83
+ when the training starts.
84
+
85
+ data_scheduler_args(Dict)
86
+ Dictionary of args to be passed to the data scheduler.
87
+ **Note: data_sparsifier arg should be ignored as the recipe
88
+ creates and pass sparsifier object into the class**
89
+
90
+ Hooks implemented:
91
+ on_train_start()
92
+ Data sparsifier and scheduler objects are created.
93
+ Pytorch model attached to the sparsifier
94
+
95
+ on_train_epoch_start()
96
+ Loads the state_dict of the data sparsifier
97
+
98
+ on_train_epoch_end()
99
+ 1. Copies the model and attaches it to the sparsifier
100
+ 2. sparsifier step() and scheduler step()
101
+ 3. Dump state_dict of the current sparsifier
102
+
103
+ on_train_end()
104
+ squash mask
105
+ """
106
+ def __init__(self, data_sparsifier_class, data_sparsifier_args,
107
+ data_scheduler_class, data_scheduler_args):
108
+ super().__init__()
109
+ # data sparsifier objects
110
+ self.data_sparsifier_class = data_sparsifier_class
111
+ self.data_sparsifier_args = data_sparsifier_args
112
+
113
+ # scheduler objects
114
+ self.data_scheduler_class = data_scheduler_class
115
+ self.data_scheduler_args = data_scheduler_args
116
+
117
+ # fields
118
+ self.data_sparsifier: Any = None
119
+ self.data_scheduler: Any = None
120
+ self.sparsified: Optional[torch.nn.Module] = None
121
+
122
+ self.data_sparsifier_state_dict: Any = None
123
+
124
+ def on_train_start(self, trainer, pl_module) -> None:
125
+ # create sparsifier
126
+ self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)
127
+ self.sparsified = deepcopy(pl_module.model)
128
+
129
+ _attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier) # just to populate the base_sl in the scheduler
130
+
131
+ # create scheduler
132
+ args = deepcopy(self.data_scheduler_args)
133
+ args['data_sparsifier'] = self.data_sparsifier
134
+ self.data_scheduler = self.data_scheduler_class(**args)
135
+
136
+ def on_train_epoch_start(self, trainer, pl_module):
137
+ if self.data_sparsifier_state_dict is None:
138
+ return # probably first epoch
139
+
140
+ # load the existing config for each data
141
+ self.data_sparsifier.load_state_dict(self.data_sparsifier_state_dict)
142
+
143
+ def __create_config_based_on_state(self, pl_module):
144
+ config: Dict = defaultdict()
145
+ if self.data_sparsifier_state_dict is None:
146
+ return config
147
+ for name, _ in pl_module.model.named_parameters():
148
+ valid_name = _get_valid_name(name)
149
+ config[valid_name] = self.data_sparsifier.data_groups[valid_name]
150
+
151
+ return config
152
+
153
+ def on_train_epoch_end(self, trainer, pl_module):
154
+ self.sparsified = deepcopy(pl_module.model)
155
+ config = self.__create_config_based_on_state(pl_module)
156
+
157
+ # attach model to the data sparsifier
158
+ _attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier, config=config)
159
+ self.data_sparsifier.step()
160
+ self.data_scheduler.step()
161
+
162
+ self.data_sparsifier_state_dict = self.data_sparsifier.state_dict()
163
+
164
+ def on_train_end(self, trainer, pl_module):
165
+ self.data_sparsifier.squash_mask()
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/quantization_utils.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.ao.pruning.sparsifier.utils import module_to_fqn, fqn_to_module
4
+ from typing import Dict, List, Optional
5
+
6
+ SUPPORTED_MODULES = {
7
+ nn.Embedding,
8
+ nn.EmbeddingBag
9
+ }
10
+
11
+
12
+ def _fetch_all_embeddings(model):
13
+ """Fetches Embedding and EmbeddingBag modules from the model
14
+ """
15
+ embedding_modules = []
16
+ stack = [model]
17
+ while stack:
18
+ module = stack.pop()
19
+ for _, child in module.named_children():
20
+ fqn_name = module_to_fqn(model, child)
21
+ if type(child) in SUPPORTED_MODULES:
22
+ embedding_modules.append((fqn_name, child))
23
+ else:
24
+ stack.append(child)
25
+ return embedding_modules
26
+
27
+
28
+ def post_training_sparse_quantize(model,
29
+ data_sparsifier_class,
30
+ sparsify_first=True,
31
+ select_embeddings: Optional[List[nn.Module]] = None,
32
+ **sparse_config):
33
+ """Takes in a model and applies sparsification and quantization to only embeddings & embeddingbags.
34
+ The quantization step can happen before or after sparsification depending on the `sparsify_first` argument.
35
+
36
+ Args:
37
+ - model (nn.Module)
38
+ model whose embeddings needs to be sparsified
39
+ - data_sparsifier_class (type of data sparsifier)
40
+ Type of sparsification that needs to be applied to model
41
+ - sparsify_first (bool)
42
+ if true, sparsifies first and then quantizes
43
+ otherwise, quantizes first and then sparsifies.
44
+ - select_embeddings (List of Embedding modules)
45
+ List of embedding modules to in the model to be sparsified & quantized.
46
+ If None, all embedding modules with be sparsified
47
+ - sparse_config (Dict)
48
+ config that will be passed to the constructor of data sparsifier object.
49
+
50
+ Note:
51
+ 1. When `sparsify_first=False`, quantization occurs first followed by sparsification.
52
+ - before sparsifying, the embedding layers are dequantized.
53
+ - scales and zero-points are saved
54
+ - embedding layers are sparsified and `squash_mask` is applied
55
+ - embedding weights are requantized using the saved scales and zero-points
56
+ 2. When `sparsify_first=True`, sparsification occurs first followed by quantization.
57
+ - embeddings are sparsified first
58
+ - quantization is applied on the sparsified embeddings
59
+ """
60
+ data_sparsifier = data_sparsifier_class(**sparse_config)
61
+
62
+ # if select_embeddings is None, perform it on all embeddings
63
+ if select_embeddings is None:
64
+ embedding_modules = _fetch_all_embeddings(model)
65
+
66
+ else:
67
+ embedding_modules = []
68
+ assert isinstance(select_embeddings, List), "the embedding_modules must be a list of embedding modules"
69
+ for emb in select_embeddings:
70
+ assert type(emb) in SUPPORTED_MODULES, "the embedding_modules list must be an embedding or embedding bags"
71
+ fqn_name = module_to_fqn(model, emb)
72
+ assert fqn_name is not None, "the embedding modules must be part of input model"
73
+ embedding_modules.append((fqn_name, emb))
74
+
75
+ if sparsify_first:
76
+ # sparsify
77
+ for name, emb_module in embedding_modules:
78
+ valid_name = name.replace('.', '_')
79
+ data_sparsifier.add_data(name=valid_name, data=emb_module)
80
+
81
+ data_sparsifier.step()
82
+ data_sparsifier.squash_mask()
83
+
84
+ # quantize
85
+ for _, emb_module in embedding_modules:
86
+ emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
87
+
88
+ torch.ao.quantization.prepare(model, inplace=True)
89
+ torch.ao.quantization.convert(model, inplace=True)
90
+
91
+ else:
92
+ # quantize
93
+ for _, emb_module in embedding_modules:
94
+ emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
95
+
96
+ torch.ao.quantization.prepare(model, inplace=True)
97
+ torch.ao.quantization.convert(model, inplace=True)
98
+
99
+ # retrieve scale & zero_points
100
+ quantize_params: Dict[str, Dict] = {'scales': {}, 'zero_points': {},
101
+ 'dequant_weights': {}, 'axis': {},
102
+ 'dtype': {}}
103
+
104
+ for name, _ in embedding_modules:
105
+ quantized_emb = fqn_to_module(model, name)
106
+ assert quantized_emb is not None # satisfy mypy
107
+
108
+ quantized_weight = quantized_emb.weight() # type: ignore[operator]
109
+ quantize_params['scales'][name] = quantized_weight.q_per_channel_scales()
110
+ quantize_params['zero_points'][name] = quantized_weight.q_per_channel_zero_points()
111
+ quantize_params['dequant_weights'][name] = torch.dequantize(quantized_weight)
112
+ quantize_params['axis'][name] = quantized_weight.q_per_channel_axis()
113
+ quantize_params['dtype'][name] = quantized_weight.dtype
114
+
115
+ # attach data to sparsifier
116
+ data_sparsifier.add_data(name=name.replace('.', '_'), data=quantize_params['dequant_weights'][name])
117
+
118
+ data_sparsifier.step()
119
+ data_sparsifier.squash_mask()
120
+
121
+ for name, _ in embedding_modules:
122
+ quantized_emb = fqn_to_module(model, name)
123
+ assert quantized_emb is not None # satisfy mypy
124
+ requantized_vector = torch.quantize_per_channel(quantize_params['dequant_weights'][name],
125
+ scales=quantize_params['scales'][name],
126
+ zero_points=quantize_params['zero_points'][name],
127
+ dtype=quantize_params['dtype'][name],
128
+ axis=quantize_params['axis'][name])
129
+
130
+ quantized_emb.set_weight(requantized_vector) # type: ignore[operator]
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/FPGM_pruner.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional, Union
2
+
3
+ import torch
4
+
5
+ from .base_structured_sparsifier import BaseStructuredSparsifier
6
+
7
+ __all__ = ["FPGMPruner"]
8
+
9
+
10
+ class FPGMPruner(BaseStructuredSparsifier):
11
+ r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner
12
+ This sparsifier prune fliter (row) in a tensor according to distances among filters according to
13
+ `Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration <https://arxiv.org/abs/1811.00250>`_.
14
+
15
+ This sparsifier is controlled by three variables:
16
+ 1. `sparsity_level` defines the number of filters (rows) that are zeroed-out.
17
+ 2. `dist` defines the distance measurement type. Default: 3 (L2 distance).
18
+ Available options are: [1, 2, (custom callable distance function)].
19
+
20
+ Note::
21
+ Inputs should be a 4D convolutional tensor of shape (N, C, H, W).
22
+ - N: output channels size
23
+ - C: input channels size
24
+ - H: height of kernel
25
+ - W: width of kernel
26
+ """
27
+
28
+ def __init__(
29
+ self, sparsity_level: float = 0.5, dist: Optional[Union[Callable, int]] = None
30
+ ):
31
+ defaults = {
32
+ "sparsity_level": sparsity_level,
33
+ }
34
+
35
+ if dist is None:
36
+ dist = 2
37
+
38
+ if callable(dist):
39
+ self.dist_fn = dist
40
+ elif dist == 1:
41
+ self.dist_fn = lambda x: torch.cdist(x, x, p=1)
42
+ elif dist == 2:
43
+ self.dist_fn = lambda x: torch.cdist(x, x, p=2)
44
+ else:
45
+ raise NotImplementedError("Distance function is not yet implemented.")
46
+ super().__init__(defaults=defaults)
47
+
48
+ def _compute_distance(self, t):
49
+ r"""Compute distance across all entries in tensor `t` along all dimension
50
+ except for the one identified by dim.
51
+ Args:
52
+ t (torch.Tensor): tensor representing the parameter to prune
53
+ Returns:
54
+ distance (torch.Tensor): distance computed across filtters
55
+ """
56
+ dim = 0 # prune filter (row)
57
+
58
+ size = t.size(dim)
59
+ slc = [slice(None)] * t.dim()
60
+
61
+ # flatten the tensor along the dimension
62
+ t_flatten = [
63
+ t[tuple(slc[:dim] + [slice(i, i + 1)] + slc[dim + 1 :])].reshape(-1)
64
+ for i in range(size)
65
+ ]
66
+ t_flatten = torch.stack(t_flatten)
67
+
68
+ # distance measurement
69
+ dist_matrix = self.dist_fn(t_flatten)
70
+
71
+ # more similar with other filter indicates large in the sum of row
72
+ distance = torch.sum(torch.abs(dist_matrix), 1)
73
+
74
+ return distance
75
+
76
+ def update_mask(self, module, tensor_name, sparsity_level, **kwargs):
77
+ tensor_weight = getattr(module, tensor_name)
78
+ mask = getattr(module.parametrizations, tensor_name)[0].mask
79
+
80
+ if sparsity_level <= 0:
81
+ mask.data = torch.ones_like(mask).bool()
82
+ elif sparsity_level >= 1.0:
83
+ mask.data = torch.zeros_like(mask).bool()
84
+ else:
85
+ distance = self._compute_distance(tensor_weight)
86
+
87
+ tensor_size = tensor_weight.shape[0] # prune filter (row)
88
+ nparams_toprune = round(sparsity_level * tensor_size)
89
+ nparams_toprune = min(
90
+ max(nparams_toprune, 0), tensor_size
91
+ ) # clamp to [0, tensor_size]
92
+ topk = torch.topk(distance, k=nparams_toprune, largest=False)
93
+ mask[topk.indices] = False
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .base_structured_sparsifier import BaseStructuredSparsifier
2
+ from .parametrization import (
3
+ FakeStructuredSparsity,
4
+ BiasHook,
5
+ )
6
+ from .saliency_pruner import SaliencyPruner
7
+ from .lstm_saliency_pruner import LSTMSaliencyPruner
8
+ from .FPGM_pruner import FPGMPruner
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import chain
2
+ from operator import getitem
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn
6
+ from torch.fx import symbolic_trace
7
+ from torch.nn.utils import parametrize
8
+ from typing import Type, Set, Dict, Callable, Tuple, Optional, Union
9
+
10
+ from torch.ao.pruning import BaseSparsifier
11
+ from .parametrization import FakeStructuredSparsity, BiasHook, module_contains_param
12
+ from .match_utils import apply_match, MatchAllNode
13
+ from .prune_functions import (
14
+ prune_linear,
15
+ prune_linear_linear,
16
+ prune_linear_activation_linear,
17
+ prune_conv2d,
18
+ prune_conv2d_conv2d,
19
+ prune_conv2d_activation_conv2d,
20
+ prune_conv2d_activation_pool_conv2d,
21
+ prune_conv2d_pool_activation_conv2d,
22
+ prune_conv2d_pool_flatten_linear,
23
+ prune_lstm_output_linear,
24
+ prune_lstm_output_layernorm_linear,
25
+ )
26
+
27
+
28
+ def _get_supported_structured_pruning_modules():
29
+ SUPPORTED_STRUCTURED_PRUNING_MODULES = { # added to config if None given
30
+ nn.Linear,
31
+ nn.Conv2d,
32
+ nn.LSTM,
33
+ }
34
+ return SUPPORTED_STRUCTURED_PRUNING_MODULES
35
+
36
+
37
+ def _get_supported_activation_functions():
38
+ SUPPORTED_ACTIVATION_FUNCTIONS = {
39
+ F.relu,
40
+ F.rrelu,
41
+ F.hardtanh,
42
+ F.relu6,
43
+ F.sigmoid,
44
+ F.hardsigmoid,
45
+ F.tanh,
46
+ F.silu,
47
+ F.mish,
48
+ F.hardswish,
49
+ F.elu,
50
+ F.celu,
51
+ F.selu,
52
+ F.hardshrink,
53
+ F.leaky_relu,
54
+ F.logsigmoid,
55
+ F.softplus,
56
+ F.prelu,
57
+ F.softsign,
58
+ F.tanhshrink,
59
+ F.gelu,
60
+ }
61
+ return SUPPORTED_ACTIVATION_FUNCTIONS
62
+
63
+
64
+ def _get_supported_activation_modules():
65
+ SUPPORTED_ACTIVATION_MODULES = {
66
+ nn.ReLU,
67
+ nn.RReLU,
68
+ nn.Hardtanh,
69
+ nn.ReLU6,
70
+ nn.Sigmoid,
71
+ nn.Hardsigmoid,
72
+ nn.Tanh,
73
+ nn.SiLU,
74
+ nn.Mish,
75
+ nn.Hardswish,
76
+ nn.ELU,
77
+ nn.CELU,
78
+ nn.SELU,
79
+ nn.Hardshrink,
80
+ nn.LeakyReLU,
81
+ nn.LogSigmoid,
82
+ nn.Softplus,
83
+ nn.PReLU,
84
+ nn.Softsign,
85
+ nn.Tanhshrink,
86
+ nn.GELU,
87
+ }
88
+ return SUPPORTED_ACTIVATION_MODULES
89
+
90
+
91
+ def _get_default_structured_pruning_patterns() -> Dict[
92
+ Tuple[Union[Type[nn.Module], Callable, MatchAllNode, str], ...],
93
+ Callable[..., None],
94
+ ]:
95
+ """
96
+ Returns the patterns for conv2d / linear conversion for each element in the activation functions/modules defined above.
97
+ """
98
+ patterns: Dict[
99
+ Tuple[Union[Type[nn.Module], Callable, MatchAllNode, str], ...],
100
+ Callable[..., None],
101
+ ] = {
102
+ # linear -> linear
103
+ (nn.Linear, "output"): prune_linear,
104
+ (nn.Linear, nn.Linear): prune_linear_linear,
105
+ # conv2d -> conv2d
106
+ (nn.Conv2d, "output"): prune_conv2d,
107
+ (nn.Conv2d, nn.Conv2d): prune_conv2d_conv2d,
108
+ # TODO LSTM Structured pruning does not support returned state currently.
109
+ # Should find a way to explicitly match getitem(0) instead of getitem.
110
+ # This will also require changing the pruning function.
111
+ # lstm -> getitem(0) -> linear
112
+ (nn.LSTM, getitem, nn.Linear): prune_lstm_output_linear,
113
+ # lstm -> getitem(0) -> layernorm -> linear
114
+ (nn.LSTM, getitem, nn.LayerNorm, nn.Linear): prune_lstm_output_layernorm_linear,
115
+ }
116
+
117
+ for activation in chain(
118
+ _get_supported_activation_functions(), _get_supported_activation_modules()
119
+ ):
120
+ patterns.update(
121
+ {
122
+ # linear -> activation -> linear
123
+ (nn.Linear, activation, nn.Linear): prune_linear_activation_linear,
124
+ # conv2d -> activation -> conv2d
125
+ (nn.Conv2d, activation, nn.Conv2d): prune_conv2d_activation_conv2d,
126
+ # conv2d -> activation -> pool -> conv2d
127
+ (
128
+ nn.Conv2d,
129
+ activation,
130
+ nn.AvgPool2d,
131
+ nn.Conv2d,
132
+ ): prune_conv2d_activation_pool_conv2d,
133
+ (
134
+ nn.Conv2d,
135
+ activation,
136
+ F.avg_pool2d,
137
+ nn.Conv2d,
138
+ ): prune_conv2d_activation_pool_conv2d,
139
+ (
140
+ nn.Conv2d,
141
+ activation,
142
+ nn.MaxPool2d,
143
+ nn.Conv2d,
144
+ ): prune_conv2d_activation_pool_conv2d,
145
+ (
146
+ nn.Conv2d,
147
+ activation,
148
+ F.max_pool2d,
149
+ nn.Conv2d,
150
+ ): prune_conv2d_activation_pool_conv2d,
151
+ # conv2d -> pool -> activation -> conv2d
152
+ (
153
+ nn.Conv2d,
154
+ nn.AvgPool2d,
155
+ activation,
156
+ nn.Conv2d,
157
+ ): prune_conv2d_pool_activation_conv2d,
158
+ (
159
+ nn.Conv2d,
160
+ F.avg_pool2d,
161
+ activation,
162
+ nn.Conv2d,
163
+ ): prune_conv2d_pool_activation_conv2d,
164
+ (
165
+ nn.Conv2d,
166
+ nn.MaxPool2d,
167
+ activation,
168
+ nn.Conv2d,
169
+ ): prune_conv2d_pool_activation_conv2d,
170
+ (
171
+ nn.Conv2d,
172
+ F.max_pool2d,
173
+ activation,
174
+ nn.Conv2d,
175
+ ): prune_conv2d_pool_activation_conv2d,
176
+ # conv2d -> adaptive pool -> flatten -> linear
177
+ (
178
+ nn.Conv2d,
179
+ nn.AdaptiveAvgPool2d,
180
+ nn.Flatten,
181
+ nn.Linear,
182
+ ): prune_conv2d_pool_flatten_linear,
183
+ (
184
+ nn.Conv2d,
185
+ nn.AdaptiveAvgPool2d,
186
+ torch.flatten,
187
+ nn.Linear,
188
+ ): prune_conv2d_pool_flatten_linear,
189
+ (
190
+ nn.Conv2d,
191
+ nn.AdaptiveMaxPool2d,
192
+ nn.Flatten,
193
+ nn.Linear,
194
+ ): prune_conv2d_pool_flatten_linear,
195
+ (
196
+ nn.Conv2d,
197
+ nn.AdaptiveMaxPool2d,
198
+ torch.flatten,
199
+ nn.Linear,
200
+ ): prune_conv2d_pool_flatten_linear,
201
+ }
202
+ )
203
+ return patterns
204
+
205
+
206
+ class BaseStructuredSparsifier(BaseSparsifier):
207
+ r"""Base class for structured pruning.
208
+
209
+ Abstract methods that need to be implemented:
210
+ - update_mask: Function to compute a new mask for all keys in the
211
+ `groups` attribute.
212
+
213
+ Args:
214
+ - defaults [dict]: default configurations will be attached to the
215
+ configuration. Only the keys that don't exist in the `config` will
216
+ be updated.
217
+ """
218
+
219
+ def __init__(self, defaults, patterns=None):
220
+ super().__init__(defaults)
221
+ if patterns is None:
222
+ patterns = _get_default_structured_pruning_patterns()
223
+ self.patterns = patterns
224
+
225
+ def make_config_from_model(
226
+ self,
227
+ model: nn.Module,
228
+ SUPPORTED_MODULES: Optional[Set[Type]] = None,
229
+ ) -> None:
230
+ if SUPPORTED_MODULES is None:
231
+ SUPPORTED_MODULES = _get_supported_structured_pruning_modules()
232
+ super().make_config_from_model(model, SUPPORTED_MODULES=SUPPORTED_MODULES)
233
+
234
+ def _prepare(self, *args, **kwargs) -> None:
235
+ r"""This function will attach the FakeStructuredSparsity parameterizations
236
+ and BiasHooks at the appropriate points in the model.
237
+ """
238
+ for config in self.groups:
239
+ module = config["module"]
240
+ tensor_name = config["tensor_name"]
241
+ parametrization = config.get("parametrization", FakeStructuredSparsity)
242
+ tensor = getattr(module, tensor_name)
243
+
244
+ mask = config.get(
245
+ "mask",
246
+ torch.ones(tensor.shape[0], dtype=torch.bool, device=tensor.device),
247
+ )
248
+ self.state[config["tensor_fqn"]]["mask"] = mask
249
+ parametrize.register_parametrization(
250
+ module, tensor_name, parametrization(mask)
251
+ )
252
+
253
+ # if linear / conv, we add in bias hooks
254
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
255
+ prune_bias = config.get("prune_bias", True)
256
+ if module.bias is not None:
257
+ module.register_parameter(
258
+ "_bias", nn.Parameter(module.bias.detach())
259
+ )
260
+ module.bias = None
261
+ module.prune_bias = prune_bias
262
+
263
+ module.register_forward_hook(
264
+ BiasHook(module.parametrizations.weight[0], prune_bias)
265
+ )
266
+
267
+ def prune(self) -> None:
268
+ r"""
269
+ This function will FX symbolically trace the model and then find instances of the patterns
270
+ defined in self.patterns (by default SUPPORTED_STRUCTURED_PRUNING_PATTERNS ).
271
+
272
+ For each pattern, it will apply to corresponding conversion function, which will modify the output
273
+ and input size expected by the modules within the pattern
274
+ """
275
+
276
+ self.traced = symbolic_trace(self.model)
277
+ modules = dict(self.traced.named_modules())
278
+
279
+ # Right now we check for matches simply by iterating across all the patterns
280
+ # if this is slow we can store patterns in a trie-structure and modify this code for faster lookup
281
+ for node in self.traced.graph.nodes:
282
+ for pattern, convert_fn in self.patterns.items():
283
+ matched = apply_match(modules, pattern, node, [])
284
+ if matched is None:
285
+ continue
286
+
287
+ first_module = modules.get(node.target)
288
+ # check if first module exists and has appropriate parameterization, otherwise skip
289
+ if (
290
+ first_module is not None
291
+ and parametrize.is_parametrized(first_module)
292
+ and module_contains_param(first_module, FakeStructuredSparsity)
293
+ ):
294
+ convert_block = []
295
+ for node in matched:
296
+ if node.op == "call_module":
297
+ convert_block.append(modules.get(node.target))
298
+ elif node.op == "call_function":
299
+ convert_block.append(node.target)
300
+ convert_fn(*convert_block)
301
+
302
+ for module in self.traced.modules():
303
+ if module_contains_param(module, FakeStructuredSparsity):
304
+ raise Exception(
305
+ f"Error: {module} still contains FakeStructuredSparsity parametrizations!"
306
+ )
307
+
308
+ self.traced.graph.lint()
309
+ self.traced.recompile()
310
+ return self.traced
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import cast
2
+
3
+ import torch
4
+ from .base_structured_sparsifier import BaseStructuredSparsifier, FakeStructuredSparsity
5
+
6
+ class LSTMSaliencyPruner(BaseStructuredSparsifier):
7
+ """
8
+ Prune packed LSTM weights based on saliency.
9
+ For each layer {k} inside a LSTM, we have two packed weight matrices
10
+ - weight_ih_l{k}
11
+ - weight_hh_l{k}
12
+
13
+ These tensors pack the weights for the 4 linear layers together for efficiency.
14
+
15
+ [W_ii | W_if | W_ig | W_io]
16
+
17
+ Pruning this tensor directly will lead to weights being misassigned when unpacked.
18
+ To ensure that each packed linear layer is pruned the same amount:
19
+ 1. We split the packed weight into the 4 constituent linear parts
20
+ 2. Update the mask for each individual piece using saliency individually
21
+
22
+ This applies to both weight_ih_l{k} and weight_hh_l{k}.
23
+ """
24
+
25
+ def update_mask(self, module, tensor_name, **kwargs):
26
+ weights = getattr(module, tensor_name)
27
+
28
+ for p in getattr(module.parametrizations, tensor_name):
29
+ if isinstance(p, FakeStructuredSparsity):
30
+ mask = cast(torch.Tensor, p.mask)
31
+
32
+ # select weights based on magnitude
33
+ if weights.dim() <= 1:
34
+ raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!")
35
+ # take norm over all but first dim
36
+ dims = tuple(range(1, weights.dim()))
37
+ saliency = weights.norm(dim=dims, p=1)
38
+
39
+ # handle weights in 4 groups
40
+ split_size = len(mask) // 4
41
+ masks = torch.split(mask, split_size)
42
+ saliencies = torch.split(saliency, split_size)
43
+
44
+ for keep_mask, sal in zip(masks, saliencies):
45
+ # mask smallest k values to be removed
46
+ k = int(len(keep_mask) * kwargs["sparsity_level"])
47
+ prune = sal.topk(k, largest=False, sorted=False).indices
48
+ keep_mask.data[prune] = False # modifies underlying p.mask directly
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/match_utils.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains utility functions to check if a pattern is in the graph and return the matching nodes
3
+ """
4
+ import torch
5
+ from torch import nn
6
+ from torch.ao.quantization.utils import (
7
+ MatchAllNode,
8
+ )
9
+ from torch.fx import Node
10
+ from torch.nn.utils import parametrize
11
+ from typing import Any, Dict, List, Optional, Tuple, Union
12
+
13
+ def _match(modules: Dict[str, nn.ModuleDict], node: Node, current: Union[nn.Module, Any]) -> bool:
14
+ r"""
15
+ checks to see if a single node of a pattern matches
16
+ """
17
+ if isinstance(current, type) and issubclass(current, MatchAllNode):
18
+ return True
19
+ if not isinstance(node, Node):
20
+ return False
21
+ if isinstance(current, type) and issubclass(current, torch.nn.Module):
22
+ return (
23
+ node.op == "call_module"
24
+ and parametrize.type_before_parametrizations(modules[node.target])
25
+ == current
26
+ )
27
+ elif callable(current):
28
+ return node.op == "call_function" and node.target is current
29
+ elif isinstance(current, str):
30
+ return node.target == current
31
+ return False
32
+
33
+ def apply_match(
34
+ modules: Dict[str, nn.ModuleDict],
35
+ pattern: Union[Tuple[Any], Any],
36
+ node: Node,
37
+ matched_node_pattern: List[Node],
38
+ ) -> Optional[List[Node]]:
39
+ r"""
40
+ This function will return the matched nodes if the pattern matches the node given
41
+ If there is no match, it will return None
42
+ """
43
+ if isinstance(pattern, tuple):
44
+ if len(pattern) == 1:
45
+ if _match(modules, node, pattern[0]):
46
+ return matched_node_pattern + [node]
47
+
48
+ first, *rest = pattern
49
+ if _match(modules, node, first):
50
+ if rest is None:
51
+ return matched_node_pattern + [node]
52
+
53
+ for user in node.users:
54
+ return apply_match(
55
+ modules, tuple(rest), user, matched_node_pattern + [node]
56
+ )
57
+ elif _match(modules, node, pattern):
58
+ return [node]
59
+ return None
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/parametrization.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from torch.nn.utils.parametrize import is_parametrized
4
+
5
+
6
+ def module_contains_param(module, parametrization):
7
+ if is_parametrized(module):
8
+ # see if any of the module tensors have a parametriztion attached that matches the one passed in
9
+ return any(
10
+ any(isinstance(param, parametrization) for param in param_list)
11
+ for key, param_list in module.parametrizations.items()
12
+ )
13
+ return False
14
+
15
+
16
+ # Structured Pruning Parameterizations
17
+ class FakeStructuredSparsity(nn.Module):
18
+ r"""
19
+ Parametrization for Structured Pruning. Like FakeSparsity, this should be attached to
20
+ the 'weight' or any other parameter that requires a mask.
21
+
22
+ Instead of an element-wise bool mask, this parameterization uses a row-wise bool mask.
23
+ """
24
+
25
+ def __init__(self, mask):
26
+ super().__init__()
27
+ self.register_buffer("mask", mask)
28
+
29
+ def forward(self, x):
30
+ assert isinstance(self.mask, torch.Tensor)
31
+ assert self.mask.shape[0] == x.shape[0]
32
+ shape = [1] * len(x.shape)
33
+ shape[0] = -1
34
+ return self.mask.reshape(shape) * x
35
+
36
+ def state_dict(self, *args, **kwargs):
37
+ # avoid double saving masks
38
+ return {}
39
+
40
+
41
+ class BiasHook:
42
+ def __init__(self, parametrization, prune_bias):
43
+ self.param = parametrization
44
+ self.prune_bias = prune_bias
45
+
46
+ def __call__(self, module, input, output):
47
+
48
+ if getattr(module, "_bias", None) is not None:
49
+ bias = module._bias.data
50
+ if self.prune_bias:
51
+ bias[~self.param.mask] = 0
52
+
53
+ # reshape bias to broadcast over output dimensions
54
+ idx = [1] * len(output.shape)
55
+ idx[1] = -1
56
+ bias = bias.reshape(idx)
57
+
58
+ output += bias
59
+ return output
venv/lib/python3.10/site-packages/torch/ao/pruning/_experimental/pruner/prune_functions.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of conversion functions for linear / conv2d structured pruning
3
+ Also contains utilities for bias propagation
4
+ """
5
+ from typing import cast, List, Optional, Callable, Tuple
6
+
7
+ import torch
8
+ from torch import nn, Tensor
9
+ from torch.nn.utils import parametrize
10
+ from torch.nn.utils.parametrize import ParametrizationList
11
+ from .parametrization import FakeStructuredSparsity, BiasHook
12
+
13
+ # BIAS PROPAGATION
14
+ def _remove_bias_handles(module: nn.Module) -> None:
15
+ if hasattr(module, "_forward_hooks"):
16
+ bias_hooks: List[int] = []
17
+ for key, hook in module._forward_hooks.items():
18
+ if isinstance(hook, BiasHook):
19
+ bias_hooks.append(key)
20
+
21
+ for key in bias_hooks:
22
+ del module._forward_hooks[key]
23
+
24
+
25
+ def _get_adjusted_next_layer_bias(
26
+ next_layer: nn.Module, pruned_biases: Tensor, mask: Tensor
27
+ ) -> nn.Parameter:
28
+ r"""Returns new adjusted bias for the second supported module"""
29
+ if parametrize.is_parametrized(next_layer):
30
+ # need to access original weight
31
+ parametrization_dict = cast(nn.ModuleDict, next_layer.parametrizations)
32
+ weight_parameterizations = cast(
33
+ ParametrizationList, parametrization_dict.weight
34
+ )
35
+ next_weight = weight_parameterizations.original
36
+ else:
37
+ next_weight = cast(Tensor, next_layer.weight)
38
+
39
+ scaling_weight = next_weight[:, ~mask]
40
+ if isinstance(next_layer, nn.Conv2d): # checking for Conv2d
41
+ # Propagating first layer pruned biases and calculating the new second layer bias
42
+ # involves more steps since the Conv2d scaling weight has extra dimensions,
43
+ # so adding bias involves broadcasting, logically:
44
+ # for each channel k in range(oC):
45
+ # scaled_biases = sum(first_bias[pruned_idx] @ next_weight[k, pruned_idx, :, :].T)
46
+ # new_next_bias[k] = old_next_bias[k] + scaled_biases
47
+ scaling_product = torch.matmul(
48
+ pruned_biases.reshape(1, -1), torch.transpose(scaling_weight, 1, 2)
49
+ )
50
+ sum_range = list(range(len(scaling_product.shape)))[
51
+ 1:
52
+ ] # all but the first dimension
53
+ scaled_biases = torch.sum(scaling_product, sum_range)
54
+ elif isinstance(next_layer, nn.Linear): # Linear
55
+ scaled_biases = torch.matmul(
56
+ pruned_biases, torch.transpose(scaling_weight, 0, 1)
57
+ ) # recall b2_new = b1 @ w2.T + b2
58
+ else:
59
+ raise NotImplementedError(f"Type {type(next_layer)} not supported yet.")
60
+
61
+ if (
62
+ parametrize.is_parametrized(next_layer)
63
+ and getattr(next_layer, "_bias", None) is not None
64
+ ): # next_layer is parametrized & has original bias ._bias
65
+ adjusted_bias = nn.Parameter(scaled_biases + next_layer._bias)
66
+ elif (
67
+ not parametrize.is_parametrized(next_layer) and next_layer.bias is not None
68
+ ): # next_layer not parametrized & has .bias
69
+ adjusted_bias = nn.Parameter(scaled_biases + next_layer.bias)
70
+ else: # next_layer has no bias
71
+ adjusted_bias = nn.Parameter(scaled_biases)
72
+ return adjusted_bias
73
+
74
+
75
+ def _prune_module_bias(module: nn.Module, mask: Tensor) -> None:
76
+ r"""Applies mask to given modules bias"""
77
+ # prune bias along with weights, discard pruned indices of bias
78
+ original_bias = cast(Tensor, getattr(module, "_bias", module.bias))
79
+ if original_bias is not None:
80
+ module.bias = nn.Parameter(original_bias[mask])
81
+
82
+ # remove _bias parameter
83
+ if hasattr(module, "_bias"):
84
+ delattr(module, "_bias")
85
+
86
+
87
+ def _propogate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:
88
+ r"""
89
+ In the case that we need to propagate biases, this function will return the biases we need
90
+ """
91
+ # set current module bias
92
+ if module.bias is not None:
93
+ module.bias = nn.Parameter(cast(Tensor, module.bias)[mask])
94
+ elif getattr(module, "_bias", None) is not None:
95
+ module.bias = nn.Parameter(cast(Tensor, module._bias)[mask])
96
+
97
+ # get pruned biases to propagate to subsequent layer
98
+ if getattr(module, "_bias", None) is not None:
99
+ pruned_biases = cast(Tensor, module._bias)[~mask]
100
+ else:
101
+ pruned_biases = None
102
+
103
+ if hasattr(module, "_bias"):
104
+ delattr(module, "_bias")
105
+
106
+ return pruned_biases
107
+
108
+
109
+ # LINEAR
110
+ def _prune_linear_helper(linear: nn.Linear) -> Tensor:
111
+ # expects linear to be a parameterized linear module
112
+ parametrization_dict = cast(nn.ModuleDict, linear.parametrizations)
113
+ weight_parameterizations = cast(ParametrizationList, parametrization_dict.weight)
114
+ for p in weight_parameterizations:
115
+ if isinstance(p, FakeStructuredSparsity):
116
+ mask = cast(Tensor, p.mask)
117
+
118
+ with torch.no_grad():
119
+ parametrize.remove_parametrizations(linear, "weight", leave_parametrized=True)
120
+ linear.weight = nn.Parameter(linear.weight[mask]) # type: ignore[possibly-undefined]
121
+ linear.out_features = linear.weight.shape[0]
122
+ _remove_bias_handles(linear)
123
+
124
+ return mask
125
+
126
+
127
+ def prune_linear(linear: nn.Linear) -> None:
128
+ mask = _prune_linear_helper(linear)
129
+ if getattr(linear, "prune_bias", False):
130
+ _prune_module_bias(linear, mask)
131
+
132
+
133
+ def prune_linear_linear(linear1: nn.Linear, linear2: nn.Linear) -> None:
134
+ prune_linear_activation_linear(linear1, None, linear2)
135
+
136
+
137
+ def prune_linear_activation_linear(
138
+ linear1: nn.Linear,
139
+ activation: Optional[Callable[[Tensor], Tensor]],
140
+ linear2: nn.Linear,
141
+ ):
142
+ mask = _prune_linear_helper(linear1)
143
+ if getattr(linear1, "prune_bias", False):
144
+ _prune_module_bias(linear1, mask)
145
+ else:
146
+ pruned_biases = _propogate_module_bias(linear1, mask)
147
+ if pruned_biases is not None:
148
+ if activation:
149
+ pruned_biases = activation(pruned_biases)
150
+ linear2.bias = _get_adjusted_next_layer_bias(linear2, pruned_biases, mask)
151
+
152
+ with torch.no_grad():
153
+ if parametrize.is_parametrized(linear2):
154
+ parametrization_dict = cast(nn.ModuleDict, linear2.parametrizations)
155
+ weight_parameterizations = cast(
156
+ ParametrizationList, parametrization_dict.weight
157
+ )
158
+
159
+ weight_parameterizations.original = nn.Parameter(
160
+ weight_parameterizations.original[:, mask]
161
+ )
162
+ linear2.in_features = weight_parameterizations.original.shape[1]
163
+ else:
164
+ linear2.weight = nn.Parameter(linear2.weight[:, mask])
165
+ linear2.in_features = linear2.weight.shape[1]
166
+
167
+
168
+ # CONV2D
169
+ def _prune_conv2d_helper(conv2d: nn.Conv2d) -> Tensor:
170
+ parametrization_dict = cast(nn.ModuleDict, conv2d.parametrizations)
171
+ weight_parameterizations = cast(ParametrizationList, parametrization_dict.weight)
172
+ for p in weight_parameterizations:
173
+ if isinstance(p, FakeStructuredSparsity):
174
+ mask = cast(Tensor, p.mask)
175
+
176
+ with torch.no_grad():
177
+ parametrize.remove_parametrizations(conv2d, "weight", leave_parametrized=True)
178
+ conv2d.weight = nn.Parameter(conv2d.weight[mask]) # type: ignore[possibly-undefined]
179
+ conv2d.out_channels = conv2d.weight.shape[0]
180
+
181
+ _remove_bias_handles(conv2d)
182
+ return mask
183
+
184
+
185
+ def prune_conv2d_padded(conv2d_1: nn.Conv2d) -> None:
186
+ parametrization_dict = cast(nn.ModuleDict, conv2d_1.parametrizations)
187
+ weight_parameterizations = cast(ParametrizationList, parametrization_dict.weight)
188
+ for p in weight_parameterizations:
189
+ if isinstance(p, FakeStructuredSparsity):
190
+ mask = cast(Tensor, p.mask)
191
+
192
+ with torch.no_grad():
193
+ parametrize.remove_parametrizations(conv2d_1, "weight", leave_parametrized=True)
194
+
195
+ if getattr(conv2d_1, "_bias", None) is not None:
196
+ if (
197
+ conv2d_1.bias is not None
198
+ ): # conv2d_1 has original bias and bias propagated from previous layer
199
+ new_bias = torch.zeros(conv2d_1.bias.shape)
200
+ new_bias[mask] = conv2d_1.bias[mask] # type: ignore[possibly-undefined]
201
+ # adjusted bias that to keep in conv2d_1
202
+ new_bias[~mask] = cast(Tensor, conv2d_1._bias)[~mask]
203
+ # pruned biases that are kept instead of propagated
204
+ conv2d_1.bias = nn.Parameter(new_bias)
205
+ else: # conv2d_1 has only original bias
206
+ conv2d_1.bias = nn.Parameter(cast(Tensor, conv2d_1._bias))
207
+ else:
208
+ # no original bias, only propagated bias
209
+ if (
210
+ conv2d_1.bias is not None
211
+ ): # conv2d_1 has bias propagated from previous layer
212
+ conv2d_1.bias.data[~mask] = 0 # type: ignore[possibly-undefined]
213
+
214
+ if hasattr(conv2d_1, "_bias"):
215
+ delattr(conv2d_1, "_bias")
216
+
217
+
218
+ def prune_conv2d(conv2d: nn.Conv2d) -> None:
219
+ mask = _prune_conv2d_helper(conv2d)
220
+ if getattr(conv2d, "prune_bias", False):
221
+ _prune_module_bias(conv2d, mask)
222
+
223
+
224
+ def prune_conv2d_conv2d(conv2d_1: nn.Conv2d, conv2d_2: nn.Conv2d) -> None:
225
+ prune_conv2d_activation_conv2d(conv2d_1, None, conv2d_2)
226
+
227
+
228
+ def prune_conv2d_activation_conv2d(
229
+ conv2d_1: nn.Conv2d,
230
+ activation: Optional[Callable[[Tensor], Tensor]],
231
+ conv2d_2: nn.Conv2d,
232
+ ):
233
+ r"""
234
+ Fusion Pattern for conv2d -> some activation module / function -> conv2d layers
235
+ """
236
+ parametrization_dict = cast(nn.ModuleDict, conv2d_1.parametrizations)
237
+ weight_parameterizations = cast(ParametrizationList, parametrization_dict.weight)
238
+ for p in weight_parameterizations:
239
+ if isinstance(p, FakeStructuredSparsity):
240
+ mask = cast(Tensor, p.mask)
241
+
242
+ prune_bias = getattr(conv2d_1, "prune_bias", False)
243
+ if (
244
+ hasattr(conv2d_2, "padding")
245
+ and cast(Tuple[int], conv2d_2.padding) > (0, 0)
246
+ and (conv2d_1.bias is not None or getattr(conv2d_1, "_bias", None) is not None)
247
+ ):
248
+ prune_conv2d_padded(conv2d_1)
249
+ else:
250
+ mask = _prune_conv2d_helper(conv2d_1)
251
+ if prune_bias:
252
+ _prune_module_bias(conv2d_1, mask)
253
+ else:
254
+ pruned_biases = _propogate_module_bias(conv2d_1, mask)
255
+ if pruned_biases is not None:
256
+ if activation:
257
+ pruned_biases = activation(pruned_biases)
258
+ conv2d_2.bias = _get_adjusted_next_layer_bias(
259
+ conv2d_2, pruned_biases, mask
260
+ )
261
+
262
+ if (
263
+ not (
264
+ hasattr(conv2d_2, "padding")
265
+ and cast(Tuple[int], conv2d_2.padding) > (0, 0)
266
+ )
267
+ or conv2d_1.bias is None
268
+ ):
269
+ with torch.no_grad():
270
+ if parametrize.is_parametrized(conv2d_2):
271
+ parametrization_dict = cast(
272
+ nn.ModuleDict, conv2d_2.parametrizations
273
+ )
274
+ weight_parameterizations = cast(
275
+ ParametrizationList, parametrization_dict.weight
276
+ )
277
+ weight_parameterizations.original = nn.Parameter(
278
+ weight_parameterizations.original[:, mask]
279
+ )
280
+ conv2d_2.in_channels = weight_parameterizations.original.shape[1]
281
+ else:
282
+ conv2d_2.weight = nn.Parameter(conv2d_2.weight[:, mask])
283
+ conv2d_2.in_channels = conv2d_2.weight.shape[1]
284
+
285
+
286
+ def prune_conv2d_pool_activation_conv2d(
287
+ c1: nn.Conv2d,
288
+ pool: nn.Module,
289
+ activation: Optional[Callable[[Tensor], Tensor]],
290
+ c2: nn.Conv2d,
291
+ ) -> None:
292
+ prune_conv2d_activation_conv2d(c1, activation, c2)
293
+
294
+
295
+ def prune_conv2d_activation_pool_conv2d(
296
+ c1: nn.Conv2d,
297
+ activation: Optional[Callable[[Tensor], Tensor]],
298
+ pool: nn.Module,
299
+ c2: nn.Conv2d,
300
+ ) -> None:
301
+ prune_conv2d_activation_conv2d(c1, activation, c2)
302
+
303
+
304
+ def prune_conv2d_pool_flatten_linear(
305
+ conv2d: nn.Conv2d,
306
+ pool: nn.Module,
307
+ flatten: Optional[Callable[[Tensor], Tensor]],
308
+ linear: nn.Linear,
309
+ ) -> None:
310
+ mask = _prune_conv2d_helper(conv2d)
311
+
312
+ # We map the pruned indices of the Conv2d output to the flattened indices of the Linear following the Flatten layer.
313
+ # we determine the flattening scale (h * w), and readjust `first_pruned_indices`
314
+ # (each idx maps to range idx * h * w to (idx+1) * h * w), `first_valid_indices`,
315
+ # and `pruned_biases` (repeat each bias by h * w).
316
+ if parametrize.is_parametrized(linear):
317
+ parametrization_dict = cast(nn.ModuleDict, linear.parametrizations)
318
+ weight_parameterizations = cast(
319
+ ParametrizationList, parametrization_dict.weight
320
+ )
321
+ linear_ic = weight_parameterizations.original.shape[1]
322
+ else:
323
+ linear_ic = linear.weight.shape[1]
324
+
325
+ conv2d_oc = len(mask)
326
+ assert (
327
+ linear_ic % conv2d_oc == 0
328
+ ), f"Flattening from dimensions {conv2d_oc} to {linear_ic} not supported"
329
+
330
+ flatten_scale = linear_ic // conv2d_oc
331
+ flattened_mask = torch.tensor(
332
+ [[val] * flatten_scale for val in mask], dtype=torch.bool, device=mask.device
333
+ ).flatten()
334
+
335
+ if getattr(conv2d, "prune_bias", False):
336
+ _prune_module_bias(conv2d, mask)
337
+ else:
338
+ pruned_biases = cast(Tensor, _propogate_module_bias(conv2d, mask))
339
+ flattened_pruned_biases = torch.tensor(
340
+ [[bias] * flatten_scale for bias in pruned_biases], device=mask.device
341
+ ).flatten()
342
+ linear.bias = _get_adjusted_next_layer_bias(
343
+ linear, flattened_pruned_biases, flattened_mask
344
+ )
345
+
346
+ with torch.no_grad():
347
+ if parametrize.is_parametrized(linear):
348
+ parametrization_dict = cast(nn.ModuleDict, linear.parametrizations)
349
+ weight_parameterizations = cast(
350
+ ParametrizationList, parametrization_dict.weight
351
+ )
352
+ weight_parameterizations.original = nn.Parameter(
353
+ weight_parameterizations.original[:, flattened_mask]
354
+ )
355
+ linear.in_features = weight_parameterizations.original.shape[1]
356
+ else:
357
+ linear.weight = nn.Parameter(linear.weight[:, flattened_mask])
358
+ linear.in_features = linear.weight.shape[1]
359
+
360
+
361
+ def prune_lstm_output_linear(
362
+ lstm: nn.LSTM, getitem: Callable, linear: nn.Linear
363
+ ) -> None:
364
+ prune_lstm_output_layernorm_linear(lstm, getitem, None, linear)
365
+
366
+
367
+ def prune_lstm_output_layernorm_linear(
368
+ lstm: nn.LSTM,
369
+ getitem: Callable,
370
+ layernorm: Optional[nn.LayerNorm],
371
+ linear: nn.Linear,
372
+ ) -> None:
373
+ for i in range(lstm.num_layers):
374
+ if parametrize.is_parametrized(lstm, f"weight_ih_l{i}"):
375
+ parametrization_dict = cast(nn.ModuleDict, lstm.parametrizations)
376
+ weight_parameterizations = cast(
377
+ ParametrizationList, parametrization_dict[f"weight_ih_l{i}"]
378
+ )
379
+ mask = weight_parameterizations[0].mask
380
+
381
+ with torch.no_grad():
382
+ parametrize.remove_parametrizations(
383
+ lstm, f"weight_ih_l{i}", leave_parametrized=True
384
+ )
385
+ setattr(
386
+ lstm,
387
+ f"weight_ih_l{i}",
388
+ nn.Parameter(getattr(lstm, f"weight_ih_l{i}")[mask]),
389
+ )
390
+ setattr(
391
+ lstm,
392
+ f"bias_ih_l{i}",
393
+ nn.Parameter(getattr(lstm, f"bias_ih_l{i}")[mask]),
394
+ )
395
+
396
+ if parametrize.is_parametrized(lstm, f"weight_hh_l{i}"):
397
+ parametrization_dict = cast(nn.ModuleDict, lstm.parametrizations)
398
+ weight_parameterizations = cast(
399
+ ParametrizationList, parametrization_dict[f"weight_hh_l{i}"]
400
+ )
401
+ mask = weight_parameterizations[0].mask
402
+
403
+ with torch.no_grad():
404
+ parametrize.remove_parametrizations(
405
+ lstm, f"weight_hh_l{i}", leave_parametrized=True
406
+ )
407
+ # splitting out hidden-hidden masks
408
+ W_hi, W_hf, W_hg, W_ho = torch.split(
409
+ getattr(lstm, f"weight_hh_l{i}"), lstm.hidden_size
410
+ )
411
+ M_hi, M_hf, M_hg, M_ho = torch.split(mask, lstm.hidden_size)
412
+
413
+ # resize each individual weight separately
414
+ W_hi = W_hi[M_hi][:, M_hi]
415
+ W_hf = W_hf[M_hf][:, M_hf]
416
+ W_hg = W_hg[M_hg][:, M_hg]
417
+ W_ho = W_ho[M_ho][:, M_ho]
418
+
419
+ # concat, use this as new weight
420
+ new_weight = torch.cat((W_hi, W_hf, W_hg, W_ho))
421
+ setattr(lstm, f"weight_hh_l{i}", nn.Parameter(new_weight))
422
+ setattr(
423
+ lstm,
424
+ f"bias_hh_l{i}",
425
+ nn.Parameter(getattr(lstm, f"bias_hh_l{i}")[mask]),
426
+ )
427
+
428
+ # If this is the final layer, then we need to prune linear layer columns
429
+ if i + 1 == lstm.num_layers:
430
+ lstm.hidden_size = int(M_hi.sum())
431
+ with torch.no_grad():
432
+ if parametrize.is_parametrized(linear):
433
+ parametrization_dict = cast(
434
+ nn.ModuleDict, linear.parametrizations
435
+ )
436
+ weight_parameterizations = cast(
437
+ ParametrizationList, parametrization_dict.weight
438
+ )
439
+
440
+ weight_parameterizations.original = nn.Parameter(
441
+ weight_parameterizations.original[:, M_ho]
442
+ )
443
+ linear.in_features = weight_parameterizations.original.shape[1]
444
+ else:
445
+ linear.weight = nn.Parameter(linear.weight[:, M_ho])
446
+ linear.in_features = linear.weight.shape[1]
447
+
448
+ # if layernorm module, prune weight and bias
449
+ if layernorm is not None:
450
+ layernorm.normalized_shape = (linear.in_features,)
451
+ layernorm.weight = nn.Parameter(layernorm.weight[M_ho])
452
+ layernorm.bias = nn.Parameter(layernorm.bias[M_ho])
453
+
454
+ # otherwise need to prune the columns of the input of the next LSTM layer
455
+ else:
456
+ with torch.no_grad():
457
+ if parametrize.is_parametrized(lstm, f"weight_ih_l{i+1}"):
458
+ parametrization_dict = cast(
459
+ nn.ModuleDict, lstm.parametrizations
460
+ )
461
+ weight_parameterizations = cast(
462
+ ParametrizationList,
463
+ getattr(parametrization_dict, f"weight_ih_l{i+1}"),
464
+ )
465
+
466
+ weight_parameterizations.original = nn.Parameter(
467
+ weight_parameterizations.original[:, M_ho]
468
+ )
469
+ else:
470
+ next_layer_weight = getattr(lstm, f"weight_ih_l{i+1}")
471
+ setattr(
472
+ lstm,
473
+ f"weight_ih_l{i+1}",
474
+ nn.Parameter(next_layer_weight[:, M_ho]),
475
+ )
venv/lib/python3.10/site-packages/torch/ao/pruning/_mappings.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "get_static_sparse_quantized_mapping",
3
+ "get_dynamic_sparse_quantized_mapping",
4
+ ]
5
+
6
+ def get_static_sparse_quantized_mapping():
7
+ import torch.ao.nn.sparse
8
+ _static_sparse_quantized_mapping = {
9
+ torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear,
10
+ }
11
+ return _static_sparse_quantized_mapping
12
+
13
+ def get_dynamic_sparse_quantized_mapping():
14
+ import torch.ao.nn.sparse
15
+ _dynamic_sparse_quantized_mapping = {
16
+ torch.nn.Linear: torch.ao.nn.sparse.quantized.dynamic.Linear,
17
+ }
18
+ return _dynamic_sparse_quantized_mapping
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/base_sparsifier.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/nearly_diagonal_sparsifier.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/base_sparsifier.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import copy
3
+ from collections import defaultdict
4
+ from typing import Any, Dict, Optional, Set, Tuple, List, Type
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn.utils import parametrize
9
+ from torch.nn.utils.parametrize import type_before_parametrizations
10
+
11
+ from .utils import (
12
+ module_contains_param,
13
+ swap_module,
14
+ FakeSparsity,
15
+ get_arg_info_from_tensor_fqn,
16
+ module_to_fqn,
17
+ )
18
+
19
+ __all__ = ["BaseSparsifier"]
20
+
21
+ SUPPORTED_MODULES = {nn.Linear}
22
+
23
+ KEYS_NOT_IN_STATE_DICT = ["module", "module_fqn", "tensor_name"]
24
+
25
+ __all__ = ["BaseSparsifier"]
26
+
27
+
28
+ # TODO update desc with new config args
29
+ class BaseSparsifier(abc.ABC):
30
+ r"""Base class for all sparsifiers.
31
+
32
+ Abstract methods that need to be implemented:
33
+
34
+ - update_mask: Function to compute a new mask for all keys in the
35
+ `groups`.
36
+
37
+ Args:
38
+ - model [nn.Module]: model to configure. The model itself is not saved
39
+ but used for the state_dict saving / loading.
40
+ - config [list]: configuration elements should be a dict map that includes
41
+ `tensor_fqn` of tensors to sparsify
42
+ - defaults [dict]: default configurations will be attached to the
43
+ configuration. Only the keys that don't exist in the `config` will
44
+ be updated.
45
+
46
+ Example::
47
+
48
+ >>> # xdoctest: +SKIP("Can't instantiate abstract class BaseSparsifier with abstract method update_mask")
49
+ >>> config = [{'tensor_fqn': 'layer1.weight', 'tensor_fqn': 'linear2.weight2', 'sparsity_level': 0.5}]
50
+ >>> defaults = {'sparsity_level': 0.7}
51
+ >>> # model.layer1.weight will have `sparsity_level` = 0.7 (getting default)
52
+ >>> sparsifier = BaseSparsifier(config, defaults)
53
+ """
54
+
55
+ def __init__(self, defaults: Optional[Dict[str, Any]] = None):
56
+ super().__init__()
57
+ self.defaults: Dict[str, Any] = defaults or {}
58
+
59
+ self.state: Dict[str, Dict] = defaultdict(dict)
60
+ self.groups: List[Dict[str, Any]] = []
61
+ self.enable_mask_update = True
62
+
63
+ def __getstate__(self) -> Dict[str, Any]:
64
+ return {
65
+ "defaults": self.defaults,
66
+ "state": self.state,
67
+ "groups": self.groups,
68
+ }
69
+
70
+ def __setstate__(self, state: Dict[str, Dict[str, Any]]) -> None:
71
+ self.__dict__.update(state)
72
+
73
+ def __repr__(self):
74
+ format_string = self.__class__.__name__ + " ("
75
+ for i, sparse_args in enumerate(self.groups):
76
+ module = sparse_args["module"]
77
+ format_string += "\n"
78
+ format_string += f"\tGroup {i}\n"
79
+ format_string += f"\t module: {module}\n"
80
+ for key in sorted(sparse_args.keys()):
81
+ if key == "module":
82
+ continue
83
+ format_string += f"\t {key}: {sparse_args[key]}\n"
84
+ format_string += ")"
85
+ return format_string
86
+
87
+ def state_dict(self) -> Dict[str, Any]:
88
+ r"""Returns the state of the optimizer as a :class:`dict`.
89
+
90
+ It contains:
91
+ * state - current state of the sparsification.
92
+ * groups - a list containing all sparsity configuration groups
93
+ with the key 'tensor_fqn' specifying the path to the sparsified tensor within a model
94
+
95
+ TODO: Need a clean way of loading the state of the "prepared" module
96
+ """
97
+
98
+ groups: List[Dict[str, Any]] = [
99
+ dict(
100
+ filter(
101
+ lambda key_value: key_value[0] not in KEYS_NOT_IN_STATE_DICT,
102
+ mg.items(),
103
+ )
104
+ )
105
+ for mg in self.groups
106
+ ]
107
+
108
+ return {
109
+ "state": self.state,
110
+ "groups": groups,
111
+ }
112
+
113
+ def load_state_dict(self, state_dict: Dict[str, Any], strict: bool = True):
114
+ groups = copy.deepcopy(state_dict["groups"])
115
+ states = state_dict["state"]
116
+ for tensor_fqn, s in states.items():
117
+ arg_info = get_arg_info_from_tensor_fqn(self.model, tensor_fqn)
118
+ module = arg_info["module"]
119
+ tensor_name = arg_info["tensor_name"]
120
+ if strict and module is None:
121
+ raise RuntimeError(f"Error loading {tensor_fqn} into the model")
122
+
123
+ found = False
124
+ for p in module.parametrizations[tensor_name]:
125
+ if isinstance(p, FakeSparsity):
126
+ found = True
127
+ break
128
+ if not found:
129
+ p = FakeSparsity(torch.ones(getattr(module, tensor_name).shape))
130
+ parametrize.register_parametrization(module, tensor_name, p)
131
+ if s.get("mask", None) is not None:
132
+ mask = s.pop("mask")
133
+ p.mask = mask
134
+
135
+ for mg in groups:
136
+ if mg["tensor_fqn"] == tensor_fqn:
137
+ mg.update(arg_info)
138
+ self.__setstate__({"state": states, "groups": groups})
139
+
140
+ def make_config_from_model(
141
+ self,
142
+ model: nn.Module,
143
+ SUPPORTED_MODULES: Set[Type] = SUPPORTED_MODULES,
144
+ ) -> None:
145
+ self.config = []
146
+ stack = [model]
147
+ while stack:
148
+ module = stack.pop()
149
+ for name, child in module.named_children():
150
+ if type(child) in SUPPORTED_MODULES:
151
+ module_fqn = module_to_fqn(model, child)
152
+ assert isinstance(module_fqn, str) # for mypy
153
+ self.config.append({"tensor_fqn": module_fqn + ".weight"})
154
+ else:
155
+ stack.append(child)
156
+
157
+ def prepare(self, model, config):
158
+ r"""Prepares a model, by adding the parametrizations.
159
+
160
+ Note::
161
+
162
+ The model is modified inplace. If you need to preserve the original
163
+ model, use copy.deepcopy.
164
+ """
165
+ self.model = model # TODO: Need to figure out how to load without this.
166
+ self.config = config
167
+
168
+ # If no config -- try getting all the supported layers
169
+ if self.config is None:
170
+ self.make_config_from_model(model)
171
+
172
+ # TODO: Remove the configuration by reference ('module')
173
+ for module_config in self.config:
174
+ assert isinstance(module_config, dict), (
175
+ "config elements should be dicts not modules i.e.:"
176
+ "[{`tensor_fqn`: `foo.bar.weight`}, {`tensor_fqn`: ... }, ...]"
177
+ )
178
+
179
+ assert isinstance(self.defaults, Dict) # for mypy
180
+ local_args = copy.deepcopy(self.defaults)
181
+ local_args.update(module_config)
182
+
183
+ tensor_fqn = local_args.get("tensor_fqn", None)
184
+ assert tensor_fqn is not None, (
185
+ "tensor_fqn is a required argument in the sparsity config which"
186
+ "replaces previous `module` and [module]`fqn` arguments"
187
+ )
188
+
189
+ # populate all information from tensor_fqn
190
+ info_from_tensor_fqn = get_arg_info_from_tensor_fqn(model, tensor_fqn)
191
+
192
+ # check that whatever was put into local_args agrees with what was obtained
193
+ # from tensor_fqn
194
+ for key in info_from_tensor_fqn.keys():
195
+ if key in local_args:
196
+ assert (
197
+ info_from_tensor_fqn[key] == local_args[key]
198
+ or (
199
+ key == "tensor_fqn"
200
+ and "." + info_from_tensor_fqn[key] == local_args[key]
201
+ )
202
+ # info_from_tensor_fqn will chop leading '.' from tensor_fqn so ignore that
203
+ ), (
204
+ f"Given both `{key}` and `tensor_fqn` in the config, it is expected them to agree!"
205
+ )
206
+ local_args.update(info_from_tensor_fqn)
207
+ self.groups.append(local_args)
208
+ self._prepare()
209
+
210
+ def _prepare(self, *args, **kwargs):
211
+ r"""Adds mask parametrization to the layer weight"""
212
+ for config in self.groups:
213
+ module = config["module"]
214
+ tensor_name = config["tensor_name"]
215
+ parametrization = config.get("parametrization", FakeSparsity)
216
+ mask = config.get("mask", torch.ones_like(getattr(module, tensor_name)))
217
+ self.state[config["tensor_fqn"]]["mask"] = mask
218
+ parametrize.register_parametrization(
219
+ module, tensor_name, parametrization(mask)
220
+ )
221
+
222
+ def squash_mask(
223
+ self,
224
+ params_to_keep: Optional[Tuple[str, ...]] = None,
225
+ params_to_keep_per_layer: Optional[Dict[str, Tuple[str, ...]]] = None,
226
+ *args,
227
+ **kwargs,
228
+ ):
229
+ r"""Squashes the sparse masks into the appropriate tensors.
230
+
231
+ If either the `params_to_keep` or `params_to_keep_per_layer` is set,
232
+ the module will have a `sparse_params` dict attached to it.
233
+
234
+ Args:
235
+ params_to_keep: List of keys to save in the module or a dict
236
+ representing the modules and keys that will have
237
+ sparsity parameters saved
238
+ params_to_keep_per_layer: Dict to specify the params that should be
239
+ saved for specific layers. The keys in the dict
240
+ should be the module fqn, while the values should
241
+ be a list of strings with the names of the variables
242
+ to save in the `sparse_params`
243
+
244
+ Examples:
245
+ >>> # xdoctest: +SKIP("locals are undefined")
246
+ >>> # Don't save any sparse params
247
+ >>> sparsifier.squash_mask()
248
+ >>> hasattr(model.submodule1, 'sparse_params')
249
+ False
250
+
251
+ >>> # Keep sparse params per layer
252
+ >>> sparsifier.squash_mask(
253
+ ... params_to_keep_per_layer={
254
+ ... 'submodule1.linear1': ('foo', 'bar'),
255
+ ... 'submodule2.linear42': ('baz',)
256
+ ... })
257
+ >>> print(model.submodule1.linear1.sparse_params)
258
+ {'foo': 42, 'bar': 24}
259
+ >>> print(model.submodule2.linear42.sparse_params)
260
+ {'baz': 0.1}
261
+
262
+ >>> # Keep sparse params for all layers
263
+ >>> sparsifier.squash_mask(params_to_keep=('foo', 'bar'))
264
+ >>> print(model.submodule1.linear1.sparse_params)
265
+ {'foo': 42, 'bar': 24}
266
+ >>> print(model.submodule2.linear42.sparse_params)
267
+ {'foo': 42, 'bar': 24}
268
+
269
+ >>> # Keep some sparse params for all layers, and specific ones for
270
+ >>> # some other layers
271
+ >>> sparsifier.squash_mask(
272
+ ... params_to_keep=('foo', 'bar'),
273
+ ... params_to_keep_per_layer={
274
+ ... 'submodule2.linear42': ('baz',)
275
+ ... })
276
+ >>> print(model.submodule1.linear1.sparse_params)
277
+ {'foo': 42, 'bar': 24}
278
+ >>> print(model.submodule2.linear42.sparse_params)
279
+ {'foo': 42, 'bar': 24, 'baz': 0.1}
280
+ """
281
+ for config in self.groups:
282
+ module = config["module"]
283
+ tensor_name = config["tensor_name"]
284
+ parametrize.remove_parametrizations(
285
+ module, tensor_name, leave_parametrized=True
286
+ )
287
+ sparse_params = {}
288
+ if params_to_keep is not None:
289
+ global_params = {k: config[k] for k in params_to_keep}
290
+ sparse_params.update(global_params)
291
+ if params_to_keep_per_layer is not None:
292
+ params = params_to_keep_per_layer.get(config["module_fqn"], None)
293
+ if params is not None:
294
+ per_layer_params = {k: config[k] for k in params}
295
+ sparse_params.update(per_layer_params)
296
+ if sparse_params:
297
+ # TODO handle multiple tensor being quantized on a single module, where to store sparse_params?
298
+ module.sparse_params = sparse_params
299
+
300
+ def convert(
301
+ self,
302
+ module: nn.Module,
303
+ mapping: Optional[Dict[Type[nn.Module], Type[nn.Module]]] = None,
304
+ inplace: bool = False,
305
+ parameterization: Type[nn.Module] = FakeSparsity,
306
+ ):
307
+ r"""Converts submodules in input module to a different module according to `mapping`
308
+ by calling `from_dense` method on the target module class
309
+ Args:
310
+ module: input module
311
+ mapping: a dictionary that maps from source module type to target
312
+ module type, can be overwritten to allow swapping user defined
313
+ Modules
314
+ inplace: carry out model transformations in-place, the original module
315
+ is mutated
316
+ """
317
+ if mapping is None:
318
+ raise NotImplementedError("Need to auto generate mapping ")
319
+ if not inplace:
320
+ module = copy.deepcopy(module)
321
+
322
+ reassign = {}
323
+ for name, mod in module.named_children():
324
+ # leaf node
325
+ if (
326
+ module_contains_param(mod, parameterization)
327
+ and type_before_parametrizations(mod) in mapping
328
+ ):
329
+ reassign[name] = swap_module(mod, mapping)
330
+ else:
331
+ # recurse
332
+ reassign[name] = self.convert(
333
+ mod,
334
+ mapping=mapping,
335
+ inplace=True,
336
+ parameterization=parameterization,
337
+ )
338
+
339
+ for key, value in reassign.items():
340
+ module._modules[key] = value
341
+
342
+ return module
343
+
344
+ def step(self, use_path: bool = True) -> None:
345
+ if not self.enable_mask_update:
346
+ return
347
+ with torch.no_grad():
348
+ for config in self.groups:
349
+ self.update_mask(**config)
350
+
351
+ @abc.abstractmethod
352
+ def update_mask(self, module: nn.Module, tensor_name: str, **kwargs):
353
+ pass
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/nearly_diagonal_sparsifier.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from . import base_sparsifier
4
+
5
+
6
+ class NearlyDiagonalSparsifier(base_sparsifier.BaseSparsifier):
7
+ r"""Nearly Diagonal Sparsifier
8
+
9
+ This sparsifier creates a nearly diagonal mask to be applied to the weight matrix.
10
+ Nearly Diagonal Matrix is a matrix that contains non-zero elements near the diagonal and the rest are zero.
11
+ An example of a nearly diagonal matrix with degree (or nearliness) 3 and 5 are follows respectively.
12
+ 1 1 0 0 1 1 1 0
13
+ 1 1 1 0 1 1 1 1
14
+ 0 1 1 1 1 1 1 1
15
+ 0 0 1 1 0 1 1 1
16
+ Note that a nearly diagonal matrix with degree 1 is just a matrix with main diagonal populated
17
+
18
+ This sparsifier is controlled by one variable:
19
+ 1. `nearliness` defines the number of non-zero diagonal lines that are closest to the main diagonal.
20
+ Currently - supports only odd number
21
+
22
+ Note:
23
+ This can be accelerated (vectorized) once the Spdiagonal feature (PR: #78439) is landed or the banded matrix
24
+ feature is landed: https://stackoverflow.com/questions/52463972/generating-banded-matrices-using-numpy
25
+
26
+ Args:
27
+ nearliness: The degree of nearliness (default = 1)
28
+
29
+ """
30
+ def __init__(self, nearliness: int = 1):
31
+ defaults = {'nearliness': nearliness}
32
+ super().__init__(defaults=defaults)
33
+
34
+ def update_mask(self, module, tensor_name, nearliness,
35
+ **kwargs):
36
+ mask = getattr(module.parametrizations, tensor_name)[0].mask
37
+ mask.data = torch.zeros_like(mask)
38
+ if nearliness <= 0:
39
+ return
40
+
41
+ tensor = getattr(module, tensor_name)
42
+ height, width = tensor.shape
43
+
44
+ if nearliness % 2 == 0:
45
+ raise ValueError("nearliness can only be an odd number")
46
+ dist_to_diagonal = nearliness // 2
47
+ # check
48
+ if dist_to_diagonal >= min(height, width):
49
+ raise ValueError("nearliness cannot be larger than the dimensions of tensor.")
50
+
51
+ for row in range(0, height):
52
+ # Bounds of entries that needs to be set to 1
53
+ low = max(0, row - dist_to_diagonal)
54
+ high = min(width, row + dist_to_diagonal + 1)
55
+ mask[row, low:high].fill_(1)
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional, Type
2
+ from torch.nn.utils.parametrize import type_before_parametrizations, is_parametrized
3
+ from itertools import chain
4
+
5
+ from torch import nn
6
+
7
+ __all__ = [
8
+ "module_contains_param",
9
+ "swap_module",
10
+ "module_to_fqn",
11
+ "fqn_to_module",
12
+ "get_arg_info_from_tensor_fqn",
13
+ "FakeSparsity",
14
+ ]
15
+
16
+
17
+ def module_contains_param(module: nn.Module, parametrization: Type[nn.Module]) -> bool:
18
+ if is_parametrized(module):
19
+ # see if any of the module tensors have a parametriztion attached that matches the one passed in
20
+ return any(
21
+ any(isinstance(param, parametrization) for param in param_list)
22
+ for key, param_list in module.parametrizations.items() # type: ignore[union-attr,operator]
23
+ )
24
+ return False
25
+
26
+
27
+ def swap_module(
28
+ mod: nn.Module, mapping: Dict[Type[nn.Module], Type[nn.Module]]
29
+ ) -> nn.Module:
30
+ r"""Swaps the module using from_dense according to the mapping passed in.
31
+ Args:
32
+ mod: input module
33
+ mapping: a dictionary that maps from nn module to sparse nn module
34
+ Return:
35
+ The corresponding sparse module of `mod` according to mapping, created using from_dense
36
+ """
37
+ if type_before_parametrizations(mod) in mapping:
38
+ sparse_mod = mapping[type_before_parametrizations(mod)]
39
+
40
+ # TODO Fix this typing, as Type[Module] has no attribute "from_dense"
41
+ new_mod = sparse_mod.from_dense(mod) # type: ignore[attr-defined]
42
+
43
+ # Preserve module's pre forward hooks. They'll be called on quantized input
44
+ for pre_hook_fn in mod._forward_pre_hooks.values():
45
+ new_mod.register_forward_pre_hook(pre_hook_fn)
46
+ # Preserve module's post forward hooks except _observer_forward_hook
47
+ # After convert they'll work with quantized output
48
+ for hook_fn in mod._forward_hooks.values():
49
+ new_mod.register_forward_hook(hook_fn)
50
+
51
+ # respect device affinity when swapping modules
52
+ devices = {p.device for p in chain(mod.parameters(), mod.buffers())}
53
+ assert len(devices) <= 1, (
54
+ f"swap_module only works with cpu or single-device CUDA modules, but got devices {devices}"
55
+ )
56
+ device = next(iter(devices)) if len(devices) > 0 else None
57
+ if device:
58
+ new_mod.to(device)
59
+
60
+ return new_mod
61
+
62
+ else:
63
+ return mod
64
+
65
+
66
+ def module_to_fqn(
67
+ model: nn.Module, module: nn.Module, prefix: str = ""
68
+ ) -> Optional[str]:
69
+ """
70
+ Returns the fqn for a module or None if module not a descendent of model.
71
+ """
72
+ if module is model:
73
+ return ""
74
+ for name, child in model.named_children():
75
+ fqn = module_to_fqn(child, module, ".")
76
+ if isinstance(fqn, str):
77
+ return prefix + name + fqn
78
+ return None
79
+
80
+
81
+ def fqn_to_module(model: Optional[nn.Module], path: str) -> Optional[nn.Module]:
82
+ """
83
+ Given an fqn, returns the corresponding module or tensor or None if the fqn given by `path`
84
+ doesn't correspond to anything. Similar to model.get_submodule(path) but works for tensors.
85
+ """
86
+ if path != "":
87
+ for name in path.split("."):
88
+ model = getattr(model, name, None)
89
+ return model
90
+
91
+
92
+ def get_arg_info_from_tensor_fqn(model: nn.Module, tensor_fqn: str) -> Dict[str, Any]:
93
+ """
94
+ Uses tensor_fqn to obtain a dict containing module_fqn, module and tensor_name
95
+ """
96
+ # string manip to split tensor_fqn into module_fqn and tensor_name
97
+ # if tensor_fqn is 'weight' then module_fqn and tensor_name are '' and 'weight'
98
+ # if tensor_fqn is 'linear.weight' then module_fqn and tensor_name are 'linear' and 'weight'
99
+ tensor_name = tensor_fqn.split(".")[-1]
100
+ module_fqn = tensor_fqn[: -len(tensor_name) - ("." in tensor_fqn)]
101
+
102
+ module = fqn_to_module(model, module_fqn)
103
+
104
+ return {
105
+ "module_fqn": module_fqn,
106
+ "module": module,
107
+ "tensor_name": tensor_name,
108
+ "tensor_fqn": tensor_fqn,
109
+ }
110
+
111
+
112
+ # Parametrizations
113
+ class FakeSparsity(nn.Module):
114
+ r"""Parametrization for the weights. Should be attached to the 'weight' or
115
+ any other parameter that requires a mask applied to it.
116
+
117
+ Note::
118
+
119
+ Once the mask is passed, the variable should not change the id. The
120
+ contents of the mask can change, but the mask reference itself should
121
+ not.
122
+ """
123
+
124
+ def __init__(self, mask):
125
+ super().__init__()
126
+ self.register_buffer("mask", mask)
127
+
128
+ def forward(self, x):
129
+ assert self.mask.shape == x.shape
130
+ return self.mask * x
131
+
132
+ def state_dict(self, *args, **kwargs):
133
+ # We don't want to let the parametrizations to save the mask.
134
+ # That way we make sure that the linear module doesn't store the masks
135
+ # alongside their parametrizations.
136
+ return {}
venv/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce
2
+ from typing import Callable, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+
7
+ from .base_sparsifier import BaseSparsifier
8
+ import operator
9
+
10
+ __all__ = ["WeightNormSparsifier"]
11
+
12
+ def _flat_idx_to_2d(idx, shape):
13
+ rows = idx // shape[1]
14
+ cols = idx % shape[1]
15
+ return rows, cols
16
+
17
+ class WeightNormSparsifier(BaseSparsifier):
18
+ r"""Weight-Norm Sparsifier
19
+
20
+ This sparsifier computes the norm of every sparse block and "zeroes-out" the
21
+ ones with the lowest norm. The level of sparsity defines how many of the
22
+ blocks is removed.
23
+
24
+ This sparsifier is controlled by three variables:
25
+ 1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
26
+ 2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
27
+ the sparse blocks originate at the zero-index of the tensor.
28
+ 3. `zeros_per_block` is the number of zeros that we are expecting in each
29
+ sparse block. By default we assume that all elements within a block are
30
+ zeroed-out. However, setting this variable sets the target number of
31
+ zeros per block. The zeros within each block are chosen as the *smallest
32
+ absolute values*.
33
+
34
+ Args:
35
+
36
+ sparsity_level: The target level of sparsity
37
+ sparse_block_shape: The shape of a sparse block (see note below)
38
+ zeros_per_block: Number of zeros in a sparse block
39
+ norm: Norm to use. Could be either `int` or a callable.
40
+ If `int`, only L1 and L2 are implemented.
41
+
42
+ Note::
43
+ The `sparse_block_shape` is tuple representing (block_ROWS, block_COLS),
44
+ irrespective of what the rows / cols mean in the data tensor. That means,
45
+ if you were to sparsify a weight tensor in the nn.Linear, which has a
46
+ weight shape `(Cout, Cin)`, the `block_ROWS` would refer to the output
47
+ channels, while the `block_COLS` would refer to the input channels.
48
+
49
+ Note::
50
+ All arguments to the WeightNormSparsifier constructor are "default"
51
+ arguments and could be overriden by the configuration provided in the
52
+ `prepare` step.
53
+ """
54
+ def __init__(self,
55
+ sparsity_level: float = 0.5,
56
+ sparse_block_shape: Tuple[int, int] = (1, 4),
57
+ zeros_per_block: Optional[int] = None,
58
+ norm: Optional[Union[Callable, int]] = None):
59
+ if zeros_per_block is None:
60
+ zeros_per_block = reduce(operator.mul, sparse_block_shape)
61
+ defaults = {
62
+ "sparsity_level": sparsity_level,
63
+ "sparse_block_shape": sparse_block_shape,
64
+ "zeros_per_block": zeros_per_block,
65
+ }
66
+ if norm is None:
67
+ norm = 2
68
+ if callable(norm):
69
+ self.norm_fn = norm
70
+ elif norm == 1:
71
+ self.norm_fn = lambda T: T.abs()
72
+ elif norm == 2:
73
+ self.norm_fn = lambda T: T * T
74
+ else:
75
+ raise NotImplementedError(f"L-{norm} is not yet implemented.")
76
+ super().__init__(defaults=defaults)
77
+
78
+ def _scatter_fold_block_mask(self, output_shape, dim, indices, block_shape,
79
+ mask=None, input_shape=None, device=None):
80
+ r"""Creates patches of size `block_shape` after scattering the indices."""
81
+ if mask is None:
82
+ assert input_shape is not None
83
+ mask = torch.ones(input_shape, device=device)
84
+ mask.scatter_(dim=dim, index=indices, value=0)
85
+ mask.data = F.fold(mask, output_size=output_shape, kernel_size=block_shape, stride=block_shape)
86
+ return mask
87
+
88
+ def _make_tensor_mask(self, data, input_shape, sparsity_level, sparse_block_shape, mask=None):
89
+ r"""Creates a tensor-level mask.
90
+
91
+ Tensor-level mask is described as a mask, where the granularity of sparsification of the
92
+ smallest patch is the sparse_block_shape. That means, that for a given mask and a
93
+ sparse_block_shape, the smallest "patch" of zeros/ones could be the sparse_block_shape.
94
+
95
+ In this context, `sparsity_level` describes the fraction of sparse patches.
96
+ """
97
+ h, w = data.shape[-2:]
98
+ block_h, block_w = sparse_block_shape
99
+ dh = (block_h - h % block_h) % block_h
100
+ dw = (block_w - w % block_w) % block_w
101
+
102
+ if mask is None:
103
+ mask = torch.ones(h + dh, w + dw, device=data.device)
104
+
105
+ if sparsity_level >= 1.0:
106
+ mask.data = torch.zeros_like(mask)
107
+ return mask
108
+ elif sparsity_level <= 0.0:
109
+ mask.data = torch.ones_like(mask)
110
+ return mask
111
+
112
+ values_per_block = reduce(operator.mul, sparse_block_shape)
113
+ if values_per_block > 1:
114
+ # Reduce the data
115
+ data = F.avg_pool2d(
116
+ data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape, ceil_mode=True
117
+ )
118
+ data = data.flatten()
119
+ num_blocks = len(data)
120
+
121
+ data = data.repeat(1, values_per_block, 1)
122
+
123
+ threshold_idx = int(round(sparsity_level * num_blocks))
124
+ threshold_idx = max(0, min(num_blocks - 1, threshold_idx)) # Sanity check
125
+ _, sorted_idx = torch.topk(data, k=threshold_idx, dim=2, largest=False)
126
+
127
+ # Temp reshape for mask
128
+ mask_reshape = mask.reshape(data.shape) # data might be reshaped
129
+ self._scatter_fold_block_mask(
130
+ dim=2, output_shape=(h + dh, w + dw),
131
+ indices=sorted_idx, block_shape=sparse_block_shape, mask=mask_reshape
132
+ )
133
+ mask.data = mask_reshape.squeeze().reshape(mask.shape)[:h, :w].contiguous()
134
+ return mask
135
+
136
+ def _make_block_mask(self, data, sparse_block_shape, zeros_per_block, mask=None):
137
+ r"""Creates a block-level mask.
138
+
139
+ Block-level mask is described as a mask, where the granularity of sparsification of the
140
+ largest patch is the sparse_block_shape. That means that for a given mask and a
141
+ sparse_block_shape, the sparsity is computed only within a patch of a size sparse_block_shape.
142
+
143
+ In this context the `zeros_per_block` describes the number of zeroed-out elements within a patch.
144
+ """
145
+ h, w = data.shape[-2:]
146
+ block_h, block_w = sparse_block_shape
147
+ dh = (block_h - h % block_h) % block_h
148
+ dw = (block_w - w % block_w) % block_w
149
+ values_per_block = reduce(operator.mul, sparse_block_shape)
150
+
151
+ if mask is None:
152
+ mask = torch.ones((h + dh, w + dw), device=data.device)
153
+
154
+ if values_per_block == zeros_per_block:
155
+ # Everything should be sparsified
156
+ mask.data = torch.zeros_like(mask)
157
+ return mask
158
+
159
+ # create a new padded tensor like data (to match the block_shape)
160
+ padded_data = torch.ones(h + dh, w + dw, dtype=data.dtype, device=data.device)
161
+ padded_data.fill_(torch.nan)
162
+ padded_data[:h, :w] = data
163
+ unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape)
164
+
165
+ # Temp reshape for mask
166
+ mask_reshape = mask.reshape(unfolded_data.shape)
167
+ _, sorted_idx = torch.topk(unfolded_data, k=zeros_per_block, dim=1, largest=False)
168
+
169
+ self._scatter_fold_block_mask(
170
+ dim=1, indices=sorted_idx, output_shape=padded_data.shape, block_shape=sparse_block_shape, mask=mask_reshape
171
+ )
172
+
173
+ mask.data = mask_reshape.squeeze().reshape(mask.shape).contiguous()
174
+ return mask
175
+
176
+ def update_mask(self, module, tensor_name, sparsity_level, sparse_block_shape,
177
+ zeros_per_block, **kwargs):
178
+ values_per_block = reduce(operator.mul, sparse_block_shape)
179
+ if zeros_per_block > values_per_block:
180
+ raise ValueError(
181
+ "Number of zeros per block cannot be more than the total number of elements in that block."
182
+ )
183
+ if zeros_per_block < 0:
184
+ raise ValueError("Number of zeros per block should be positive.")
185
+
186
+ mask = getattr(module.parametrizations, tensor_name)[0].mask
187
+ if sparsity_level <= 0 or zeros_per_block == 0:
188
+ mask.data = torch.ones_like(mask)
189
+ elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
190
+ mask.data = torch.zeros_like(mask)
191
+ else:
192
+ ww = self.norm_fn(getattr(module, tensor_name))
193
+ tensor_mask = self._make_tensor_mask(
194
+ data=ww, input_shape=ww.shape, sparsity_level=sparsity_level, sparse_block_shape=sparse_block_shape
195
+ )
196
+ if values_per_block != zeros_per_block:
197
+ block_mask = self._make_block_mask(data=ww, sparse_block_shape=sparse_block_shape,
198
+ zeros_per_block=zeros_per_block)
199
+ tensor_mask = torch.logical_or(tensor_mask, block_mask)
200
+ mask.data = tensor_mask
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (279 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc ADDED
Binary file (26.7 kB). View file