applied-ai-018 commited on
Commit
c75e8d3
·
verified ·
1 Parent(s): f50f36b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. lm-evaluation-harness/tests/testdata/arithmetic_3ds-v0-loglikelihood +1 -0
  3. lm-evaluation-harness/tests/testdata/arithmetic_5ds-v0-res.json +1 -0
  4. lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood +1 -0
  5. lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-loglikelihood +1 -0
  6. lm-evaluation-harness/tests/testdata/coqa-v0-res.json +1 -0
  7. lm-evaluation-harness/tests/testdata/coqa-v1-greedy_until +1 -0
  8. lm-evaluation-harness/tests/testdata/ethics_deontology-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/headqa_es-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/hendrycksTest-formal_logic-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-loglikelihood +1 -0
  15. lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/multirc-v0-loglikelihood +1 -0
  17. lm-evaluation-harness/tests/testdata/pile_gutenberg-v1-res.json +1 -0
  18. lm-evaluation-harness/tests/testdata/pile_nih-exporter-v0-loglikelihood_rolling +1 -0
  19. lm-evaluation-harness/tests/testdata/truthfulqa_mc-v0-loglikelihood +1 -0
  20. lm-evaluation-harness/tests/testdata/truthfulqa_mc-v0-res.json +1 -0
  21. lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-greedy_until +1 -0
  22. venv/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py +89 -0
  23. venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py +116 -0
  29. venv/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py +841 -0
  30. venv/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py +931 -0
  31. venv/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py +249 -0
  32. venv/lib/python3.10/site-packages/transformers/models/gemma/__init__.py +121 -0
  33. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/configuration_gemma.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/convert_gemma_weights_to_hf.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/modeling_flax_gemma.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/modeling_gemma.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/tokenization_gemma.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/tokenization_gemma_fast.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/gemma/configuration_gemma.py +153 -0
  41. venv/lib/python3.10/site-packages/transformers/models/gemma/convert_gemma_weights_to_hf.py +206 -0
  42. venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_flax_gemma.py +773 -0
  43. venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_gemma.py +1372 -0
  44. venv/lib/python3.10/site-packages/transformers/models/gemma/tokenization_gemma.py +326 -0
  45. venv/lib/python3.10/site-packages/transformers/models/gemma/tokenization_gemma_fast.py +199 -0
  46. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py +43 -0
  47. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py +197 -0
  50. venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py +318 -0
ckpts/universal/global_step20/zero/15.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecfab5416ce2aeb6ea50892b9d3d0344a1185f04618fc51a844ccbdc6cf2e51f
3
+ size 50332828
lm-evaluation-harness/tests/testdata/arithmetic_3ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d3d8bad8827d4530945a1d8b3c7589c0235bbed0bc89e7561a6fdac678f6ce5c
lm-evaluation-harness/tests/testdata/arithmetic_5ds-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_5ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_5ds": 0}}
lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7c2ed82612af9175052cd44d8e178b6dd084c04eb462a3d88fcacfad2df8be8e
lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f69d9891f59872538962221fccc425b07df7cfbd83cdc546ce83e6b0e9a93f7c
lm-evaluation-harness/tests/testdata/coqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"coqa": {"em": 0.0, "em_stderr": 0.0, "f1": 0.0, "f1_stderr": 0.0}}, "versions": {"coqa": 0}}
lm-evaluation-harness/tests/testdata/coqa-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 57581470b921435d40da97872bb1cfda6ecf963ccc4b0240a3b04e3fea8c8e3a
lm-evaluation-harness/tests/testdata/ethics_deontology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_deontology": {"acc": 0.503615127919911, "acc_stderr": 0.008338908432085105, "em": 0.07119021134593993}}, "versions": {"ethics_deontology": 0}}
lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5b42ba1faf5ece6a6ec9a3976ce79c1fac8df5b98272aab85457188c2142693c
lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_utilitarianism_original": {"acc": 0.5214226289517471, "acc_stderr": 0.007204999520618661}}, "versions": {"ethics_utilitarianism_original": 0}}
lm-evaluation-harness/tests/testdata/headqa_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f
lm-evaluation-harness/tests/testdata/hendrycksTest-formal_logic-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-formal_logic": {"acc": 0.25396825396825395, "acc_norm": 0.2698412698412698, "acc_norm_stderr": 0.03970158273235172, "acc_stderr": 0.03893259610604674}}, "versions": {"hendrycksTest-formal_logic": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_physics": {"acc": 0.2582781456953642, "acc_norm": 0.271523178807947, "acc_norm_stderr": 0.03631329803969653, "acc_stderr": 0.035737053147634576}}, "versions": {"hendrycksTest-high_school_physics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ea9b2cefd27959db564168f6ad1169a5eaa012fc5a5d5b8faf9e34d94e335dc1
lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_intermediate_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_intermediate_algebra": 1}}
lm-evaluation-harness/tests/testdata/multirc-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ cdb026c027437a8b4653212d0944d36fc16f49921dcb8e4bef899d15a55e9f80
lm-evaluation-harness/tests/testdata/pile_gutenberg-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_gutenberg": {"bits_per_byte": 1.7952329146458065e-06, "byte_perplexity": 1.0000012443614075, "word_perplexity": 1.0000072174665404}}, "versions": {"pile_gutenberg": 1}}
lm-evaluation-harness/tests/testdata/pile_nih-exporter-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 520ea6e04e8a39dc0b5f63a837429a78a40e63d39d109096101feb8c5b2cf8d8
lm-evaluation-harness/tests/testdata/truthfulqa_mc-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 226a6783976177dc9ceda5688623ff37023242eff30ddf270b886bf7b9b32228
lm-evaluation-harness/tests/testdata/truthfulqa_mc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"truthfulqa_mc": {"mc1": 0.2141982864137087, "mc1_stderr": 0.01436214815569045, "mc2": 0.465436996173817, "mc2_stderr": 0.0048422530880316405}}, "versions": {"truthfulqa_mc": 0}}
lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 111ea3efdc08f1cf536631b9426c3a20e482c575d009d2a8c71f59c027578eec
venv/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
22
+ "tokenization_ctrl": ["CTRLTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_ctrl"] = [
32
+ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "CTRLForSequenceClassification",
34
+ "CTRLLMHeadModel",
35
+ "CTRLModel",
36
+ "CTRLPreTrainedModel",
37
+ ]
38
+
39
+ try:
40
+ if not is_tf_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_tf_ctrl"] = [
46
+ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "TFCTRLForSequenceClassification",
48
+ "TFCTRLLMHeadModel",
49
+ "TFCTRLModel",
50
+ "TFCTRLPreTrainedModel",
51
+ ]
52
+
53
+
54
+ if TYPE_CHECKING:
55
+ from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
56
+ from .tokenization_ctrl import CTRLTokenizer
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_ctrl import (
65
+ CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ CTRLForSequenceClassification,
67
+ CTRLLMHeadModel,
68
+ CTRLModel,
69
+ CTRLPreTrainedModel,
70
+ )
71
+
72
+ try:
73
+ if not is_tf_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_tf_ctrl import (
79
+ TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ TFCTRLForSequenceClassification,
81
+ TFCTRLLMHeadModel,
82
+ TFCTRLModel,
83
+ TFCTRLPreTrainedModel,
84
+ )
85
+
86
+ else:
87
+ import sys
88
+
89
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc ADDED
Binary file (7.53 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Salesforce and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Salesforce CTRL configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class CTRLConfig(PretrainedConfig):
28
+ """
29
+ This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to
30
+ instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the
32
+ [Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 246534):
39
+ Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`].
41
+ n_positions (`int`, *optional*, defaults to 256):
42
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
43
+ just in case (e.g., 512 or 1024 or 2048).
44
+ n_embd (`int`, *optional*, defaults to 1280):
45
+ Dimensionality of the embeddings and hidden states.
46
+ dff (`int`, *optional*, defaults to 8192):
47
+ Dimensionality of the inner dimension of the feed forward networks (FFN).
48
+ n_layer (`int`, *optional*, defaults to 48):
49
+ Number of hidden layers in the Transformer encoder.
50
+ n_head (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
53
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
54
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
55
+ The dropout ratio for the embeddings.
56
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
57
+ The epsilon to use in the layer normalization layers
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ use_cache (`bool`, *optional*, defaults to `True`):
61
+ Whether or not the model should return the last key/values attentions (not used by all models).
62
+
63
+
64
+ Examples:
65
+
66
+ ```python
67
+ >>> from transformers import CTRLConfig, CTRLModel
68
+
69
+ >>> # Initializing a CTRL configuration
70
+ >>> configuration = CTRLConfig()
71
+
72
+ >>> # Initializing a model (with random weights) from the configuration
73
+ >>> model = CTRLModel(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "ctrl"
80
+ keys_to_ignore_at_inference = ["past_key_values"]
81
+ attribute_map = {
82
+ "max_position_embeddings": "n_positions",
83
+ "hidden_size": "n_embd",
84
+ "num_attention_heads": "n_head",
85
+ "num_hidden_layers": "n_layer",
86
+ }
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_size=246534,
91
+ n_positions=256,
92
+ n_embd=1280,
93
+ dff=8192,
94
+ n_layer=48,
95
+ n_head=16,
96
+ resid_pdrop=0.1,
97
+ embd_pdrop=0.1,
98
+ layer_norm_epsilon=1e-6,
99
+ initializer_range=0.02,
100
+ use_cache=True,
101
+ **kwargs,
102
+ ):
103
+ self.vocab_size = vocab_size
104
+ self.n_positions = n_positions
105
+ self.n_embd = n_embd
106
+ self.n_layer = n_layer
107
+ self.n_head = n_head
108
+ self.dff = dff
109
+ self.resid_pdrop = resid_pdrop
110
+ self.embd_pdrop = embd_pdrop
111
+ self.layer_norm_epsilon = layer_norm_epsilon
112
+ self.initializer_range = initializer_range
113
+
114
+ self.use_cache = use_cache
115
+
116
+ super().__init__(**kwargs)
venv/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py ADDED
@@ -0,0 +1,841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Salesforce and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch CTRL model."""
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer
28
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
29
+ from .configuration_ctrl import CTRLConfig
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ _CONFIG_FOR_DOC = "CTRLConfig"
35
+
36
+
37
+ from ..deprecated._archive_maps import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
38
+
39
+
40
+ def angle_defn(pos, i, d_model_size):
41
+ angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)
42
+ return pos * angle_rates
43
+
44
+
45
+ def positional_encoding(position, d_model_size, dtype):
46
+ # create the sinusoidal pattern for the positional encoding
47
+ angle_rads = angle_defn(
48
+ torch.arange(position, dtype=torch.int64).to(dtype).unsqueeze(1),
49
+ torch.arange(d_model_size, dtype=torch.int64).to(dtype).unsqueeze(0),
50
+ d_model_size,
51
+ )
52
+
53
+ sines = torch.sin(angle_rads[:, 0::2])
54
+ cosines = torch.cos(angle_rads[:, 1::2])
55
+
56
+ pos_encoding = torch.cat([sines, cosines], dim=-1)
57
+ return pos_encoding
58
+
59
+
60
+ def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
61
+ # calculate attention
62
+ matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
63
+
64
+ dk = k.shape[-1]
65
+ scaled_attention_logits = matmul_qk / np.sqrt(dk)
66
+
67
+ if mask is not None:
68
+ nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)
69
+ scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4
70
+
71
+ if attention_mask is not None:
72
+ # Apply the attention mask
73
+ scaled_attention_logits = scaled_attention_logits + attention_mask
74
+
75
+ attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
76
+
77
+ # Mask heads if we want to
78
+ if head_mask is not None:
79
+ attention_weights = attention_weights * head_mask
80
+
81
+ output = torch.matmul(attention_weights, v)
82
+
83
+ return output, attention_weights
84
+
85
+
86
+ class MultiHeadAttention(nn.Module):
87
+ def __init__(self, d_model_size, num_heads):
88
+ super().__init__()
89
+ self.num_heads = num_heads
90
+ self.d_model_size = d_model_size
91
+
92
+ self.depth = int(d_model_size / self.num_heads)
93
+
94
+ self.Wq = nn.Linear(d_model_size, d_model_size)
95
+ self.Wk = nn.Linear(d_model_size, d_model_size)
96
+ self.Wv = nn.Linear(d_model_size, d_model_size)
97
+
98
+ self.dense = nn.Linear(d_model_size, d_model_size)
99
+ self.pruned_heads = set()
100
+
101
+ def prune_heads(self, heads):
102
+ attention_head_size = self.d_model_size // self.num_heads
103
+ if len(heads) == 0:
104
+ return
105
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
106
+
107
+ # Prune linear layers
108
+ self.Wq = prune_linear_layer(self.Wq, index)
109
+ self.Wk = prune_linear_layer(self.Wk, index)
110
+ self.Wv = prune_linear_layer(self.Wv, index)
111
+ self.dense = prune_linear_layer(self.dense, index, dim=1)
112
+
113
+ # Update hyper params
114
+ self.num_heads = self.num_heads - len(heads)
115
+ self.d_model_size = attention_head_size * self.num_heads
116
+ self.pruned_heads = self.pruned_heads.union(heads)
117
+
118
+ def split_into_heads(self, x, batch_size):
119
+ x = x.reshape(batch_size, -1, self.num_heads, self.depth)
120
+ return x.permute([0, 2, 1, 3])
121
+
122
+ def forward(
123
+ self,
124
+ v,
125
+ k,
126
+ q,
127
+ mask,
128
+ layer_past=None,
129
+ attention_mask=None,
130
+ head_mask=None,
131
+ use_cache=False,
132
+ output_attentions=False,
133
+ ):
134
+ batch_size = q.shape[0]
135
+
136
+ q = self.Wq(q)
137
+ k = self.Wk(k)
138
+ v = self.Wv(v)
139
+
140
+ q = self.split_into_heads(q, batch_size)
141
+ k = self.split_into_heads(k, batch_size)
142
+ v = self.split_into_heads(v, batch_size)
143
+ if layer_past is not None:
144
+ past_key, past_value = layer_past[0], layer_past[1]
145
+ k = torch.cat((past_key, k), dim=-2)
146
+ v = torch.cat((past_value, v), dim=-2)
147
+
148
+ if use_cache is True:
149
+ present = torch.stack((k, v))
150
+ else:
151
+ present = (None,)
152
+
153
+ output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
154
+ scaled_attention = output[0].permute([0, 2, 1, 3])
155
+ attn = output[1]
156
+ original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
157
+ output = self.dense(original_size_attention)
158
+
159
+ outputs = (output, present)
160
+ if output_attentions:
161
+ outputs = outputs + (attn,)
162
+ return outputs
163
+
164
+
165
+ def point_wise_feed_forward_network(d_model_size, dff):
166
+ return nn.Sequential(nn.Linear(d_model_size, dff), nn.ReLU(), nn.Linear(dff, d_model_size))
167
+
168
+
169
+ class EncoderLayer(nn.Module):
170
+ def __init__(self, d_model_size, num_heads, dff, rate=0.1):
171
+ super().__init__()
172
+
173
+ self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads)
174
+ self.ffn = point_wise_feed_forward_network(d_model_size, dff)
175
+
176
+ self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-6)
177
+ self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-6)
178
+
179
+ self.dropout1 = nn.Dropout(rate)
180
+ self.dropout2 = nn.Dropout(rate)
181
+
182
+ def forward(
183
+ self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False
184
+ ):
185
+ normed = self.layernorm1(x)
186
+ attn_outputs = self.multi_head_attention(
187
+ normed,
188
+ normed,
189
+ normed,
190
+ mask,
191
+ layer_past=layer_past,
192
+ attention_mask=attention_mask,
193
+ head_mask=head_mask,
194
+ use_cache=use_cache,
195
+ output_attentions=output_attentions,
196
+ )
197
+ attn_output = attn_outputs[0]
198
+ attn_output = self.dropout1(attn_output)
199
+ out1 = x + attn_output
200
+
201
+ out2 = self.layernorm2(out1)
202
+ ffn_output = self.ffn(out2)
203
+ ffn_output = self.dropout2(ffn_output)
204
+ out2 = out1 + ffn_output
205
+
206
+ outputs = (out2,) + attn_outputs[1:]
207
+ return outputs
208
+
209
+
210
+ class CTRLPreTrainedModel(PreTrainedModel):
211
+ """
212
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
213
+ models.
214
+ """
215
+
216
+ config_class = CTRLConfig
217
+ base_model_prefix = "transformer"
218
+
219
+ def _init_weights(self, module):
220
+ """Initialize the weights."""
221
+ if isinstance(module, (nn.Linear, Conv1D)):
222
+ # Slightly different from the TF version which uses truncated_normal for initialization
223
+ # cf https://github.com/pytorch/pytorch/pull/5617
224
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
225
+ if module.bias is not None:
226
+ module.bias.data.zero_()
227
+ elif isinstance(module, nn.Embedding):
228
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
229
+ if module.padding_idx is not None:
230
+ module.weight.data[module.padding_idx].zero_()
231
+ elif isinstance(module, nn.LayerNorm):
232
+ module.bias.data.zero_()
233
+ module.weight.data.fill_(1.0)
234
+
235
+
236
+ CTRL_START_DOCSTRING = r"""
237
+
238
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
239
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
240
+ etc.)
241
+
242
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
243
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
244
+ and behavior.
245
+
246
+ Parameters:
247
+ config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
248
+ Initializing with a config file does not load the weights associated with the model, only the
249
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
250
+ """
251
+
252
+ CTRL_INPUTS_DOCSTRING = r"""
253
+ Args:
254
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
255
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`
256
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
257
+
258
+ If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as
259
+ `input_ids`.
260
+
261
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
262
+ [`PreTrainedTokenizer.encode`] for details.
263
+
264
+ [What are input IDs?](../glossary#input-ids)
265
+ past_key_values (`Tuple[Tuple[torch.FloatTensor]]` of length `config.n_layers`):
266
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
267
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
268
+ their past given to this model should not be passed as input ids as they have already been computed.
269
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
270
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
271
+
272
+ - 1 for tokens that are **not masked**,
273
+ - 0 for tokens that are **masked**.
274
+
275
+ [What are attention masks?](../glossary#attention-mask)
276
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
277
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
278
+ 1]`:
279
+
280
+ - 0 corresponds to a *sentence A* token,
281
+ - 1 corresponds to a *sentence B* token.
282
+
283
+ [What are token type IDs?](../glossary#token-type-ids)
284
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
285
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
286
+ config.max_position_embeddings - 1]`.
287
+
288
+ [What are position IDs?](../glossary#position-ids)
289
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
290
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
291
+
292
+ - 1 indicates the head is **not masked**,
293
+ - 0 indicates the head is **masked**.
294
+
295
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
296
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
297
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
298
+ model's internal embedding lookup matrix.
299
+ use_cache (`bool`, *optional*):
300
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
301
+ `past_key_values`).
302
+ output_attentions (`bool`, *optional*):
303
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
304
+ tensors for more detail.
305
+ output_hidden_states (`bool`, *optional*):
306
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
307
+ more detail.
308
+ return_dict (`bool`, *optional*):
309
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
310
+ """
311
+
312
+
313
+ @add_start_docstrings(
314
+ "The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
315
+ CTRL_START_DOCSTRING,
316
+ )
317
+ class CTRLModel(CTRLPreTrainedModel):
318
+ def __init__(self, config):
319
+ super().__init__(config)
320
+
321
+ self.d_model_size = config.n_embd
322
+ self.num_layers = config.n_layer
323
+
324
+ self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
325
+
326
+ self.w = nn.Embedding(config.vocab_size, config.n_embd)
327
+
328
+ self.dropout = nn.Dropout(config.embd_pdrop)
329
+ self.h = nn.ModuleList(
330
+ [EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop) for _ in range(config.n_layer)]
331
+ )
332
+ self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
333
+
334
+ # Initialize weights and apply final processing
335
+ self.post_init()
336
+
337
+ def get_input_embeddings(self):
338
+ return self.w
339
+
340
+ def set_input_embeddings(self, new_embeddings):
341
+ self.w = new_embeddings
342
+
343
+ def _prune_heads(self, heads_to_prune):
344
+ """
345
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
346
+ """
347
+ for layer, heads in heads_to_prune.items():
348
+ self.h[layer].multi_head_attention.prune_heads(heads)
349
+
350
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
351
+ @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
352
+ def forward(
353
+ self,
354
+ input_ids: Optional[torch.LongTensor] = None,
355
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
356
+ attention_mask: Optional[torch.FloatTensor] = None,
357
+ token_type_ids: Optional[torch.LongTensor] = None,
358
+ position_ids: Optional[torch.LongTensor] = None,
359
+ head_mask: Optional[torch.FloatTensor] = None,
360
+ inputs_embeds: Optional[torch.FloatTensor] = None,
361
+ use_cache: Optional[bool] = None,
362
+ output_attentions: Optional[bool] = None,
363
+ output_hidden_states: Optional[bool] = None,
364
+ return_dict: Optional[bool] = None,
365
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
366
+ r"""
367
+ Returns:
368
+
369
+ Example:
370
+
371
+ ```python
372
+ >>> from transformers import AutoTokenizer, CTRLModel
373
+ >>> import torch
374
+
375
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
376
+ >>> model = CTRLModel.from_pretrained("Salesforce/ctrl")
377
+
378
+ >>> # CTRL was trained with control codes as the first token
379
+ >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
380
+ >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
381
+
382
+ >>> outputs = model(**inputs)
383
+
384
+ >>> last_hidden_states = outputs.last_hidden_state
385
+ >>> list(last_hidden_states.shape)
386
+ [1, 5, 1280]
387
+ ```"""
388
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
389
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
390
+ output_hidden_states = (
391
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
392
+ )
393
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
394
+
395
+ if input_ids is not None and inputs_embeds is not None:
396
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
397
+ elif input_ids is not None:
398
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
399
+ input_shape = input_ids.size()
400
+ input_ids = input_ids.view(-1, input_shape[-1])
401
+ batch_size = input_ids.shape[0]
402
+ elif inputs_embeds is not None:
403
+ input_shape = inputs_embeds.size()[:-1]
404
+ batch_size = inputs_embeds.shape[0]
405
+ else:
406
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
407
+
408
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
409
+
410
+ if past_key_values is None:
411
+ past_length = 0
412
+ past_key_values = tuple([None] * len(self.h))
413
+ else:
414
+ past_length = past_key_values[0][0].size(-2)
415
+ if position_ids is None:
416
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
417
+ position_ids = position_ids.unsqueeze(0)
418
+
419
+ # Attention mask.
420
+ if attention_mask is not None:
421
+ if batch_size <= 0:
422
+ raise ValueError("batch_size has to be defined and > 0")
423
+ attention_mask = attention_mask.view(batch_size, -1)
424
+ # We create a 3D attention mask from a 2D tensor mask.
425
+ # Sizes are [batch_size, 1, 1, to_seq_length]
426
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
427
+ # this attention mask is more simple than the triangular masking of causal attention
428
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
429
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
430
+
431
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
432
+ # masked positions, this operation will create a tensor which is 0.0 for
433
+ # positions we want to attend and the dtype's smallest value for masked positions.
434
+ # Since we are adding it to the raw scores before the softmax, this is
435
+ # effectively the same as removing these entirely.
436
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
437
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
438
+
439
+ # Prepare head mask if needed
440
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
441
+
442
+ if token_type_ids is not None:
443
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
444
+ token_type_embeds = self.w(token_type_ids)
445
+ token_type_embeds *= np.sqrt(self.d_model_size)
446
+ else:
447
+ token_type_embeds = 0
448
+
449
+ if inputs_embeds is None:
450
+ inputs_embeds = self.w(input_ids)
451
+ # inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
452
+ seq_len = input_shape[-1]
453
+ mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device)
454
+
455
+ inputs_embeds *= np.sqrt(self.d_model_size)
456
+
457
+ # `self.pos_encoding` won't be sent to the correct device along the model, so we do it manually.
458
+ self.pos_encoding = self.pos_encoding.to(device)
459
+ pos_embeds = self.pos_encoding[position_ids, :]
460
+
461
+ hidden_states = inputs_embeds + pos_embeds + token_type_embeds
462
+
463
+ hidden_states = self.dropout(hidden_states)
464
+
465
+ presents = () if use_cache else None
466
+ all_hidden_states = () if output_hidden_states else None
467
+ all_attentions = () if output_attentions else None
468
+ for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
469
+ if output_hidden_states:
470
+ all_hidden_states = all_hidden_states + (hidden_states,)
471
+ outputs = h(
472
+ hidden_states,
473
+ mask,
474
+ layer_past=layer_past,
475
+ attention_mask=attention_mask,
476
+ head_mask=head_mask[i],
477
+ use_cache=use_cache,
478
+ output_attentions=output_attentions,
479
+ )
480
+ hidden_states, present = outputs[:2]
481
+ if use_cache is True:
482
+ presents = presents + (present,)
483
+
484
+ if output_attentions:
485
+ all_attentions += (outputs[2],)
486
+
487
+ hidden_states = self.layernorm(hidden_states)
488
+ if output_hidden_states:
489
+ all_hidden_states = all_hidden_states + (hidden_states,)
490
+
491
+ if not return_dict:
492
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
493
+
494
+ return BaseModelOutputWithPast(
495
+ last_hidden_state=hidden_states,
496
+ past_key_values=presents,
497
+ hidden_states=all_hidden_states,
498
+ attentions=all_attentions,
499
+ )
500
+
501
+
502
+ @add_start_docstrings(
503
+ """
504
+ The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
505
+ embeddings).
506
+ """,
507
+ CTRL_START_DOCSTRING,
508
+ )
509
+ class CTRLLMHeadModel(CTRLPreTrainedModel):
510
+ _tied_weights_keys = ["lm_head.weight"]
511
+
512
+ def __init__(self, config):
513
+ super().__init__(config)
514
+ self.transformer = CTRLModel(config)
515
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
516
+
517
+ # Initialize weights and apply final processing
518
+ self.post_init()
519
+
520
+ def get_output_embeddings(self):
521
+ return self.lm_head
522
+
523
+ def set_output_embeddings(self, new_embeddings):
524
+ self.lm_head = new_embeddings
525
+
526
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs):
527
+ # only last tokens for inputs_ids if past is defined in kwargs
528
+ if past_key_values is not None:
529
+ past_length = past_key_values[0][0].shape[2]
530
+
531
+ # Some generation methods already pass only the last input ID
532
+ if input_ids.shape[1] > past_length:
533
+ remove_prefix_length = past_length
534
+ else:
535
+ # Default to old behavior: keep only final ID
536
+ remove_prefix_length = input_ids.shape[1] - 1
537
+
538
+ input_ids = input_ids[:, remove_prefix_length:]
539
+
540
+ return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": use_cache}
541
+
542
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
543
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
544
+ def forward(
545
+ self,
546
+ input_ids: Optional[torch.LongTensor] = None,
547
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
548
+ attention_mask: Optional[torch.FloatTensor] = None,
549
+ token_type_ids: Optional[torch.LongTensor] = None,
550
+ position_ids: Optional[torch.LongTensor] = None,
551
+ head_mask: Optional[torch.FloatTensor] = None,
552
+ inputs_embeds: Optional[torch.FloatTensor] = None,
553
+ labels: Optional[torch.LongTensor] = None,
554
+ use_cache: Optional[bool] = None,
555
+ output_attentions: Optional[bool] = None,
556
+ output_hidden_states: Optional[bool] = None,
557
+ return_dict: Optional[bool] = None,
558
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
559
+ r"""
560
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
561
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
562
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
563
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
564
+
565
+ Returns:
566
+
567
+ Example:
568
+
569
+ ```python
570
+ >>> import torch
571
+ >>> from transformers import AutoTokenizer, CTRLLMHeadModel
572
+
573
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
574
+ >>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl")
575
+
576
+ >>> # CTRL was trained with control codes as the first token
577
+ >>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt")
578
+ >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
579
+
580
+ >>> sequence_ids = model.generate(inputs["input_ids"])
581
+ >>> sequences = tokenizer.batch_decode(sequence_ids)
582
+ >>> sequences
583
+ ['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,']
584
+
585
+ >>> outputs = model(**inputs, labels=inputs["input_ids"])
586
+ >>> round(outputs.loss.item(), 2)
587
+ 9.21
588
+
589
+ >>> list(outputs.logits.shape)
590
+ [1, 5, 246534]
591
+ ```"""
592
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
593
+
594
+ transformer_outputs = self.transformer(
595
+ input_ids,
596
+ past_key_values=past_key_values,
597
+ attention_mask=attention_mask,
598
+ token_type_ids=token_type_ids,
599
+ position_ids=position_ids,
600
+ head_mask=head_mask,
601
+ inputs_embeds=inputs_embeds,
602
+ use_cache=use_cache,
603
+ output_attentions=output_attentions,
604
+ output_hidden_states=output_hidden_states,
605
+ return_dict=return_dict,
606
+ )
607
+
608
+ hidden_states = transformer_outputs[0]
609
+
610
+ lm_logits = self.lm_head(hidden_states)
611
+
612
+ loss = None
613
+ if labels is not None:
614
+ # Shift so that tokens < n predict n
615
+ shift_logits = lm_logits[..., :-1, :].contiguous()
616
+ shift_labels = labels[..., 1:].contiguous()
617
+ # Flatten the tokens
618
+ loss_fct = CrossEntropyLoss()
619
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
620
+
621
+ if not return_dict:
622
+ output = (lm_logits,) + transformer_outputs[1:]
623
+ return ((loss,) + output) if loss is not None else output
624
+
625
+ return CausalLMOutputWithPast(
626
+ loss=loss,
627
+ logits=lm_logits,
628
+ past_key_values=transformer_outputs.past_key_values,
629
+ hidden_states=transformer_outputs.hidden_states,
630
+ attentions=transformer_outputs.attentions,
631
+ )
632
+
633
+ @staticmethod
634
+ def _reorder_cache(
635
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
636
+ ) -> Tuple[Tuple[torch.Tensor]]:
637
+ """
638
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
639
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
640
+ beam_idx at every generation step.
641
+ """
642
+ return tuple(
643
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
644
+ for layer_past in past_key_values
645
+ )
646
+
647
+
648
+ @add_start_docstrings(
649
+ """
650
+ The CTRL Model transformer with a sequence classification head on top (linear layer).
651
+ [`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
652
+ (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last
653
+ token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in
654
+ each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
655
+ guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last
656
+ value in each row of the batch).
657
+ """,
658
+ CTRL_START_DOCSTRING,
659
+ )
660
+ class CTRLForSequenceClassification(CTRLPreTrainedModel):
661
+ def __init__(self, config):
662
+ super().__init__(config)
663
+ self.num_labels = config.num_labels
664
+ self.transformer = CTRLModel(config)
665
+ self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False)
666
+
667
+ # Initialize weights and apply final processing
668
+ self.post_init()
669
+
670
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
671
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
672
+ def forward(
673
+ self,
674
+ input_ids: Optional[torch.LongTensor] = None,
675
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
676
+ attention_mask: Optional[torch.FloatTensor] = None,
677
+ token_type_ids: Optional[torch.LongTensor] = None,
678
+ position_ids: Optional[torch.LongTensor] = None,
679
+ head_mask: Optional[torch.FloatTensor] = None,
680
+ inputs_embeds: Optional[torch.FloatTensor] = None,
681
+ labels: Optional[torch.LongTensor] = None,
682
+ use_cache: Optional[bool] = None,
683
+ output_attentions: Optional[bool] = None,
684
+ output_hidden_states: Optional[bool] = None,
685
+ return_dict: Optional[bool] = None,
686
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
687
+ r"""
688
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
689
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
690
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
691
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
692
+
693
+ Returns:
694
+
695
+ Example of single-label classification:
696
+
697
+ ```python
698
+ >>> import torch
699
+ >>> from transformers import AutoTokenizer, CTRLForSequenceClassification
700
+
701
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
702
+ >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl")
703
+
704
+ >>> # CTRL was trained with control codes as the first token
705
+ >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
706
+ >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
707
+
708
+ >>> with torch.no_grad():
709
+ ... logits = model(**inputs).logits
710
+
711
+ >>> predicted_class_id = logits.argmax().item()
712
+ >>> model.config.id2label[predicted_class_id]
713
+ 'LABEL_0'
714
+ ```
715
+
716
+ ```python
717
+ >>> import torch
718
+
719
+ >>> torch.manual_seed(42) # doctest: +IGNORE_RESULT
720
+ >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
721
+ >>> num_labels = len(model.config.id2label)
722
+ >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
723
+
724
+ >>> labels = torch.tensor(1)
725
+ >>> loss = model(**inputs, labels=labels).loss
726
+ >>> round(loss.item(), 2)
727
+ 0.93
728
+ ```
729
+
730
+ Example of multi-label classification:
731
+
732
+ ```python
733
+ >>> import torch
734
+ >>> from transformers import AutoTokenizer, CTRLForSequenceClassification
735
+
736
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
737
+ >>> model = CTRLForSequenceClassification.from_pretrained(
738
+ ... "Salesforce/ctrl", problem_type="multi_label_classification"
739
+ ... )
740
+
741
+ >>> # CTRL was trained with control codes as the first token
742
+ >>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
743
+ >>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
744
+
745
+ >>> with torch.no_grad():
746
+ ... logits = model(**inputs).logits
747
+
748
+ >>> predicted_class_id = logits.argmax().item()
749
+ >>> model.config.id2label[predicted_class_id]
750
+ 'LABEL_0'
751
+ ```
752
+
753
+ ```python
754
+ >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
755
+ >>> num_labels = len(model.config.id2label)
756
+ >>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
757
+
758
+ >>> num_labels = len(model.config.id2label)
759
+ >>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to(
760
+ ... torch.float
761
+ ... )
762
+ >>> loss = model(**inputs, labels=labels).loss
763
+ >>> loss.backward() # doctest: +IGNORE_RESULT
764
+ ```"""
765
+
766
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
767
+
768
+ transformer_outputs = self.transformer(
769
+ input_ids,
770
+ past_key_values=past_key_values,
771
+ attention_mask=attention_mask,
772
+ token_type_ids=token_type_ids,
773
+ position_ids=position_ids,
774
+ head_mask=head_mask,
775
+ inputs_embeds=inputs_embeds,
776
+ use_cache=use_cache,
777
+ output_attentions=output_attentions,
778
+ output_hidden_states=output_hidden_states,
779
+ return_dict=return_dict,
780
+ )
781
+
782
+ hidden_states = transformer_outputs[0]
783
+ logits = self.classifier(hidden_states)
784
+
785
+ if input_ids is not None:
786
+ batch_size, sequence_length = input_ids.shape[:2]
787
+ else:
788
+ batch_size, sequence_length = inputs_embeds.shape[:2]
789
+
790
+ if self.config.pad_token_id is None and batch_size != 1:
791
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
792
+
793
+ if self.config.pad_token_id is None:
794
+ sequence_lengths = -1
795
+ else:
796
+ if input_ids is not None:
797
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
798
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
799
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
800
+ sequence_lengths = sequence_lengths.to(logits.device)
801
+ else:
802
+ sequence_lengths = -1
803
+ logger.warning(
804
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
805
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
806
+ )
807
+
808
+ pooled_logits = logits[range(batch_size), sequence_lengths]
809
+
810
+ loss = None
811
+ if labels is not None:
812
+ if self.config.problem_type is None:
813
+ if self.num_labels == 1:
814
+ self.config.problem_type = "regression"
815
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
816
+ self.config.problem_type = "single_label_classification"
817
+ else:
818
+ self.config.problem_type = "multi_label_classification"
819
+
820
+ if self.config.problem_type == "regression":
821
+ loss_fct = MSELoss()
822
+ if self.num_labels == 1:
823
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
824
+ else:
825
+ loss = loss_fct(pooled_logits, labels)
826
+ elif self.config.problem_type == "single_label_classification":
827
+ loss_fct = CrossEntropyLoss()
828
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
829
+ elif self.config.problem_type == "multi_label_classification":
830
+ loss_fct = BCEWithLogitsLoss()
831
+ loss = loss_fct(pooled_logits, labels)
832
+ if not return_dict:
833
+ output = (pooled_logits,) + transformer_outputs[2:]
834
+ return ((loss,) + output) if loss is not None else output
835
+
836
+ return SequenceClassifierOutput(
837
+ loss=loss,
838
+ logits=pooled_logits,
839
+ hidden_states=transformer_outputs.hidden_states,
840
+ attentions=transformer_outputs.attentions,
841
+ )
venv/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Salesforce and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 CTRL model."""
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput
26
+ from ...modeling_tf_utils import (
27
+ TFCausalLanguageModelingLoss,
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
37
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
38
+ from .configuration_ctrl import CTRLConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "Salesforce/ctrl"
44
+ _CONFIG_FOR_DOC = "CTRLConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ def angle_defn(pos, i, d_model_size):
51
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)
52
+ return pos * angle_rates
53
+
54
+
55
+ def positional_encoding(position, d_model_size):
56
+ # create the sinusoidal pattern for the positional encoding
57
+ angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
58
+
59
+ sines = np.sin(angle_rads[:, 0::2])
60
+ cosines = np.cos(angle_rads[:, 1::2])
61
+ pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))
62
+
63
+ return pos_encoding
64
+
65
+
66
+ def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
67
+ # calculate attention
68
+ matmul_qk = tf.matmul(q, k, transpose_b=True)
69
+
70
+ dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)
71
+ scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
72
+
73
+ if mask is not None:
74
+ scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)
75
+
76
+ if attention_mask is not None:
77
+ # Apply the attention mask
78
+ attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)
79
+ scaled_attention_logits = scaled_attention_logits + attention_mask
80
+
81
+ attention_weights = stable_softmax(scaled_attention_logits, axis=-1)
82
+
83
+ # Mask heads if we want to
84
+ if head_mask is not None:
85
+ attention_weights = attention_weights * head_mask
86
+
87
+ output = tf.matmul(attention_weights, v)
88
+
89
+ return output, attention_weights
90
+
91
+
92
+ class TFMultiHeadAttention(keras.layers.Layer):
93
+ def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
94
+ super().__init__(**kwargs)
95
+ self.num_heads = num_heads
96
+ self.d_model_size = d_model_size
97
+ self.output_attentions = output_attentions
98
+
99
+ self.depth = int(d_model_size / self.num_heads)
100
+
101
+ self.Wq = keras.layers.Dense(d_model_size, name="Wq")
102
+ self.Wk = keras.layers.Dense(d_model_size, name="Wk")
103
+ self.Wv = keras.layers.Dense(d_model_size, name="Wv")
104
+
105
+ self.dense = keras.layers.Dense(d_model_size, name="dense")
106
+
107
+ def split_into_heads(self, x, batch_size):
108
+ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
109
+ return tf.transpose(x, perm=[0, 2, 1, 3])
110
+
111
+ def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
112
+ batch_size = shape_list(q)[0]
113
+
114
+ q = self.Wq(q)
115
+ k = self.Wk(k)
116
+ v = self.Wv(v)
117
+
118
+ q = self.split_into_heads(q, batch_size)
119
+ k = self.split_into_heads(k, batch_size)
120
+ v = self.split_into_heads(v, batch_size)
121
+
122
+ if layer_past is not None:
123
+ past_key, past_value = tf.unstack(layer_past, axis=0)
124
+ k = tf.concat((past_key, k), axis=-2)
125
+ v = tf.concat((past_value, v), axis=-2)
126
+
127
+ if use_cache:
128
+ present = tf.stack((k, v), axis=0)
129
+ else:
130
+ present = (None,)
131
+
132
+ output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
133
+ scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
134
+ attn = output[1]
135
+ original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
136
+ output = self.dense(original_size_attention)
137
+ outputs = (output, present)
138
+
139
+ if output_attentions:
140
+ outputs = outputs + (attn,)
141
+
142
+ return outputs
143
+
144
+ def build(self, input_shape=None):
145
+ if self.built:
146
+ return
147
+ self.built = True
148
+ if getattr(self, "Wq", None) is not None:
149
+ with tf.name_scope(self.Wq.name):
150
+ self.Wq.build([None, None, self.d_model_size])
151
+ if getattr(self, "Wk", None) is not None:
152
+ with tf.name_scope(self.Wk.name):
153
+ self.Wk.build([None, None, self.d_model_size])
154
+ if getattr(self, "Wv", None) is not None:
155
+ with tf.name_scope(self.Wv.name):
156
+ self.Wv.build([None, None, self.d_model_size])
157
+ if getattr(self, "dense", None) is not None:
158
+ with tf.name_scope(self.dense.name):
159
+ self.dense.build([None, None, self.d_model_size])
160
+
161
+
162
+ class TFPointWiseFeedForwardLayer(keras.layers.Layer):
163
+ def __init__(self, d_model_size, dff, **kwargs):
164
+ super().__init__(**kwargs)
165
+
166
+ self.dense_0 = keras.layers.Dense(dff, activation="relu", name="0")
167
+ self.dense_2 = keras.layers.Dense(d_model_size, name="2")
168
+ self.d_model_size = d_model_size
169
+ self.dff = dff
170
+
171
+ def call(self, inputs, trainable=False):
172
+ dense_0_output = self.dense_0(inputs)
173
+ dense_2_output = self.dense_2(dense_0_output)
174
+
175
+ return dense_2_output
176
+
177
+ def build(self, input_shape=None):
178
+ if self.built:
179
+ return
180
+ self.built = True
181
+ if getattr(self, "dense_0", None) is not None:
182
+ with tf.name_scope(self.dense_0.name):
183
+ self.dense_0.build([None, None, self.d_model_size])
184
+ if getattr(self, "dense_2", None) is not None:
185
+ with tf.name_scope(self.dense_2.name):
186
+ self.dense_2.build([None, None, self.dff])
187
+
188
+
189
+ class TFEncoderLayer(keras.layers.Layer):
190
+ def __init__(
191
+ self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
192
+ ):
193
+ super().__init__(**kwargs)
194
+
195
+ self.output_attentions = output_attentions
196
+
197
+ self.multi_head_attention = TFMultiHeadAttention(
198
+ d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention"
199
+ )
200
+ self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn")
201
+
202
+ self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
203
+ self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
204
+
205
+ self.dropout1 = keras.layers.Dropout(rate)
206
+ self.dropout2 = keras.layers.Dropout(rate)
207
+ self.d_model_size = d_model_size
208
+
209
+ def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
210
+ normed = self.layernorm1(x)
211
+ attn_outputs = self.multi_head_attention(
212
+ normed,
213
+ normed,
214
+ normed,
215
+ mask,
216
+ layer_past,
217
+ attention_mask,
218
+ head_mask,
219
+ use_cache,
220
+ output_attentions,
221
+ training=training,
222
+ )
223
+ attn_output = attn_outputs[0]
224
+ attn_output = self.dropout1(attn_output, training=training)
225
+ out1 = x + attn_output
226
+
227
+ out2 = self.layernorm2(out1)
228
+ ffn_output = self.ffn(out2)
229
+ ffn_output = self.dropout2(ffn_output, training=training)
230
+ out2 = out1 + ffn_output
231
+
232
+ outputs = (out2,) + attn_outputs[1:]
233
+ return outputs
234
+
235
+ def build(self, input_shape=None):
236
+ if self.built:
237
+ return
238
+ self.built = True
239
+ if getattr(self, "multi_head_attention", None) is not None:
240
+ with tf.name_scope(self.multi_head_attention.name):
241
+ self.multi_head_attention.build(None)
242
+ if getattr(self, "ffn", None) is not None:
243
+ with tf.name_scope(self.ffn.name):
244
+ self.ffn.build(None)
245
+ if getattr(self, "layernorm1", None) is not None:
246
+ with tf.name_scope(self.layernorm1.name):
247
+ self.layernorm1.build([None, None, self.d_model_size])
248
+ if getattr(self, "layernorm2", None) is not None:
249
+ with tf.name_scope(self.layernorm2.name):
250
+ self.layernorm2.build([None, None, self.d_model_size])
251
+
252
+
253
+ @keras_serializable
254
+ class TFCTRLMainLayer(keras.layers.Layer):
255
+ config_class = CTRLConfig
256
+
257
+ def __init__(self, config, **kwargs):
258
+ super().__init__(**kwargs)
259
+
260
+ self.config = config
261
+ self.output_hidden_states = config.output_hidden_states
262
+ self.output_attentions = config.output_attentions
263
+ self.use_cache = config.use_cache
264
+ self.return_dict = config.use_return_dict
265
+
266
+ self.d_model_size = config.n_embd
267
+ self.num_layers = config.n_layer
268
+
269
+ self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
270
+
271
+ self.w = keras.layers.Embedding(
272
+ input_dim=config.vocab_size,
273
+ output_dim=config.n_embd,
274
+ embeddings_initializer=get_initializer(config.initializer_range),
275
+ name="w",
276
+ )
277
+
278
+ self.dropout = keras.layers.Dropout(config.embd_pdrop)
279
+ self.h = [
280
+ TFEncoderLayer(
281
+ config.n_embd,
282
+ config.n_head,
283
+ config.dff,
284
+ config.resid_pdrop,
285
+ config.layer_norm_epsilon,
286
+ self.output_attentions,
287
+ name=f"h_._{i}",
288
+ )
289
+ for i in range(config.n_layer)
290
+ ]
291
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
292
+
293
+ def get_input_embeddings(self):
294
+ return self.w
295
+
296
+ def set_input_embeddings(self, new_embeddings):
297
+ self.w = new_embeddings
298
+
299
+ def _prune_heads(self, heads_to_prune):
300
+ """
301
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
302
+ """
303
+ raise NotImplementedError
304
+
305
+ @unpack_inputs
306
+ def call(
307
+ self,
308
+ input_ids: TFModelInputType | None = None,
309
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
310
+ attention_mask: np.ndarray | tf.Tensor | None = None,
311
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
312
+ position_ids: np.ndarray | tf.Tensor | None = None,
313
+ head_mask: np.ndarray | tf.Tensor | None = None,
314
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
315
+ use_cache: Optional[bool] = None,
316
+ output_attentions: Optional[bool] = None,
317
+ output_hidden_states: Optional[bool] = None,
318
+ return_dict: Optional[bool] = None,
319
+ training: Optional[bool] = False,
320
+ ) -> Union[Tuple, TFBaseModelOutputWithPast]:
321
+ # If using past key value states, only the last tokens
322
+ # should be given as an input
323
+ if past_key_values is not None:
324
+ if input_ids is not None:
325
+ input_ids = input_ids[:, -1:]
326
+ if inputs_embeds is not None:
327
+ inputs_embeds = inputs_embeds[:, -1:]
328
+ if token_type_ids is not None:
329
+ token_type_ids = token_type_ids[:, -1:]
330
+
331
+ if input_ids is not None and inputs_embeds is not None:
332
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
333
+ elif input_ids is not None:
334
+ input_shape = shape_list(input_ids)
335
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
336
+ elif inputs_embeds is not None:
337
+ input_shape = shape_list(inputs_embeds)[:-1]
338
+ else:
339
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
340
+
341
+ if past_key_values is None:
342
+ past_length = 0
343
+ past_key_values = [None] * len(self.h)
344
+ else:
345
+ past_length = shape_list(past_key_values[0][0])[-2]
346
+ if position_ids is None:
347
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0)
348
+ position_ids = tf.tile(position_ids, [input_shape[0], 1])
349
+
350
+ # Attention mask.
351
+ if attention_mask is not None:
352
+ # We create a 3D attention mask from a 2D tensor mask.
353
+ # Sizes are [batch_size, 1, 1, to_seq_length]
354
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
355
+ # this attention mask is more simple than the triangular masking of causal attention
356
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
357
+ attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length))
358
+
359
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
360
+ # masked positions, this operation will create a tensor which is 0.0 for
361
+ # positions we want to attend and -10000.0 for masked positions.
362
+ # Since we are adding it to the raw scores before the softmax, this is
363
+ # effectively the same as removing these entirely.
364
+
365
+ one_cst = tf.constant(1.0)
366
+ ten_thousand_cst = tf.constant(-10000.0)
367
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
368
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst)
369
+
370
+ # Prepare head mask if needed
371
+ # 1.0 in head_mask indicate we keep the head
372
+ # attention_probs has shape bsz x n_heads x N x N
373
+ # head_mask has shape n_layer x batch x n_heads x N x N
374
+ if head_mask is not None:
375
+ raise NotImplementedError
376
+ else:
377
+ head_mask = [None] * self.num_layers
378
+
379
+ if token_type_ids is not None:
380
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
381
+ token_type_embeds = self.w(token_type_ids)
382
+ token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))
383
+ else:
384
+ token_type_embeds = tf.constant(0.0)
385
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
386
+
387
+ if inputs_embeds is None:
388
+ check_embeddings_within_bounds(input_ids, self.w.input_dim)
389
+ inputs_embeds = self.w(input_ids)
390
+ seq_len = input_shape[-1]
391
+ mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
392
+
393
+ inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype))
394
+
395
+ pos_embeds = tf.gather(self.pos_encoding, position_ids)
396
+ pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)
397
+ hidden_states = inputs_embeds + pos_embeds + token_type_embeds
398
+
399
+ hidden_states = self.dropout(hidden_states, training=training)
400
+
401
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
402
+ presents = () if use_cache else None
403
+ all_hidden_states = () if output_hidden_states else None
404
+ all_attentions = () if output_attentions else None
405
+ for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
406
+ if output_hidden_states:
407
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
408
+ outputs = h(
409
+ hidden_states,
410
+ mask,
411
+ layer_past,
412
+ attention_mask,
413
+ head_mask[i],
414
+ use_cache,
415
+ output_attentions,
416
+ training=training,
417
+ )
418
+ hidden_states, present = outputs[:2]
419
+
420
+ if use_cache:
421
+ presents = presents + (present,)
422
+
423
+ if output_attentions:
424
+ all_attentions = all_attentions + (outputs[2],)
425
+
426
+ hidden_states = self.layernorm(hidden_states)
427
+ hidden_states = tf.reshape(hidden_states, output_shape)
428
+ if output_hidden_states:
429
+ all_hidden_states = all_hidden_states + (hidden_states,)
430
+
431
+ if output_attentions:
432
+ # let the number of heads free (-1) so we can extract attention even after head pruning
433
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
434
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
435
+
436
+ if not return_dict:
437
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
438
+
439
+ return TFBaseModelOutputWithPast(
440
+ last_hidden_state=hidden_states,
441
+ past_key_values=presents,
442
+ hidden_states=all_hidden_states,
443
+ attentions=all_attentions,
444
+ )
445
+
446
+ def build(self, input_shape=None):
447
+ if self.built:
448
+ return
449
+ self.built = True
450
+ if getattr(self, "w", None) is not None:
451
+ with tf.name_scope(self.w.name):
452
+ self.w.build(None)
453
+ if getattr(self, "layernorm", None) is not None:
454
+ with tf.name_scope(self.layernorm.name):
455
+ self.layernorm.build([None, None, self.config.n_embd])
456
+ if getattr(self, "h", None) is not None:
457
+ for layer in self.h:
458
+ with tf.name_scope(layer.name):
459
+ layer.build(None)
460
+
461
+
462
+ class TFCTRLPreTrainedModel(TFPreTrainedModel):
463
+ """
464
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
465
+ models.
466
+ """
467
+
468
+ config_class = CTRLConfig
469
+ base_model_prefix = "transformer"
470
+
471
+
472
+ CTRL_START_DOCSTRING = r"""
473
+
474
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
475
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
476
+ etc.)
477
+
478
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
479
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
480
+ behavior.
481
+
482
+ <Tip>
483
+
484
+ TensorFlow models and layers in `transformers` accept two formats as input:
485
+
486
+ - having all inputs as keyword arguments (like PyTorch models), or
487
+ - having all inputs as a list, tuple or dict in the first positional argument.
488
+
489
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
490
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
491
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
492
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
493
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
494
+ positional argument:
495
+
496
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
497
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
498
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
499
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
500
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
501
+
502
+ Note that when creating models and layers with
503
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
504
+ about any of this, as you can just pass inputs like you would to any other Python function!
505
+
506
+ </Tip>
507
+
508
+ Parameters:
509
+ config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
510
+ Initializing with a config file does not load the weights associated with the model, only the
511
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
512
+ """
513
+
514
+ CTRL_INPUTS_DOCSTRING = r"""
515
+ Args:
516
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
517
+ `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
518
+ input past key value states).
519
+
520
+ Indices of input sequence tokens in the vocabulary.
521
+
522
+ If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
523
+
524
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
525
+ [`PreTrainedTokenizer.encode`] for details.
526
+
527
+ [What are input IDs?](../glossary#input-ids)
528
+ past (`List[tf.Tensor]` of length `config.n_layers`):
529
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
530
+ `past` output below). Can be used to speed up sequential decoding. The token ids which have their past
531
+ given to this model should not be passed as input ids as they have already been computed.
532
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
533
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
534
+
535
+ - 1 for tokens that are **not masked**,
536
+ - 0 for tokens that are **masked**.
537
+
538
+ [What are attention masks?](../glossary#attention-mask)
539
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
540
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
541
+ 1]`:
542
+
543
+ - 0 corresponds to a *sentence A* token,
544
+ - 1 corresponds to a *sentence B* token.
545
+
546
+ [What are token type IDs?](../glossary#token-type-ids)
547
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
548
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
549
+ config.max_position_embeddings - 1]`.
550
+
551
+ [What are position IDs?](../glossary#position-ids)
552
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
553
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
554
+
555
+ - 1 indicates the head is **not masked**,
556
+ - 0 indicates the head is **masked**.
557
+
558
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
559
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
560
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
561
+ model's internal embedding lookup matrix.
562
+ use_cache (`bool`, *optional*):
563
+ If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).
564
+ output_attentions (`bool`, *optional*):
565
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
566
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
567
+ config will be used instead.
568
+ output_hidden_states (`bool`, *optional*):
569
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
570
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
571
+ used instead.
572
+ return_dict (`bool`, *optional*):
573
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
574
+ eager mode, in graph mode the value will always be set to True.
575
+ training (`bool`, *optional*, defaults to `False`):
576
+ Whether or not to use the model in training mode (some modules like dropout modules have different
577
+ behaviors between training and evaluation).
578
+ """
579
+
580
+
581
+ @add_start_docstrings(
582
+ "The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
583
+ CTRL_START_DOCSTRING,
584
+ )
585
+ class TFCTRLModel(TFCTRLPreTrainedModel):
586
+ def __init__(self, config, *inputs, **kwargs):
587
+ super().__init__(config, *inputs, **kwargs)
588
+ self.transformer = TFCTRLMainLayer(config, name="transformer")
589
+
590
+ @unpack_inputs
591
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
592
+ @add_code_sample_docstrings(
593
+ checkpoint=_CHECKPOINT_FOR_DOC,
594
+ output_type=TFBaseModelOutputWithPast,
595
+ config_class=_CONFIG_FOR_DOC,
596
+ )
597
+ def call(
598
+ self,
599
+ input_ids: TFModelInputType | None = None,
600
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
601
+ attention_mask: np.ndarray | tf.Tensor | None = None,
602
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
603
+ position_ids: np.ndarray | tf.Tensor | None = None,
604
+ head_mask: np.ndarray | tf.Tensor | None = None,
605
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
606
+ use_cache: Optional[bool] = None,
607
+ output_attentions: Optional[bool] = None,
608
+ output_hidden_states: Optional[bool] = None,
609
+ return_dict: Optional[bool] = None,
610
+ training: Optional[bool] = False,
611
+ ) -> Union[Tuple, TFBaseModelOutputWithPast]:
612
+ outputs = self.transformer(
613
+ input_ids=input_ids,
614
+ past_key_values=past_key_values,
615
+ attention_mask=attention_mask,
616
+ token_type_ids=token_type_ids,
617
+ position_ids=position_ids,
618
+ head_mask=head_mask,
619
+ inputs_embeds=inputs_embeds,
620
+ use_cache=use_cache,
621
+ output_attentions=output_attentions,
622
+ output_hidden_states=output_hidden_states,
623
+ return_dict=return_dict,
624
+ training=training,
625
+ )
626
+ return outputs
627
+
628
+ def build(self, input_shape=None):
629
+ if self.built:
630
+ return
631
+ self.built = True
632
+ if getattr(self, "transformer", None) is not None:
633
+ with tf.name_scope(self.transformer.name):
634
+ self.transformer.build(None)
635
+
636
+
637
+ class TFCTRLBiasLayer(keras.layers.Layer):
638
+ """
639
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
640
+ so all weights have to be registered in a layer.
641
+ """
642
+
643
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
644
+ super().__init__(name=name, **kwargs)
645
+ self.shape = shape
646
+ self.initializer = initializer
647
+ self.trainable = trainable
648
+
649
+ def build(self, input_shape):
650
+ self.bias = self.add_weight(
651
+ name="bias", shape=self.shape, initializer=self.initializer, trainable=self.trainable
652
+ )
653
+ super().build(input_shape)
654
+
655
+ def call(self, x):
656
+ return x + self.bias
657
+
658
+
659
+ @add_start_docstrings(
660
+ """
661
+ The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
662
+ embeddings).
663
+ """,
664
+ CTRL_START_DOCSTRING,
665
+ )
666
+ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):
667
+ def __init__(self, config, *inputs, **kwargs):
668
+ super().__init__(config, *inputs, **kwargs)
669
+ self.transformer = TFCTRLMainLayer(config, name="transformer")
670
+ self.bias_layer = TFCTRLBiasLayer(
671
+ name="lm_head", shape=[1, config.vocab_size], initializer="zeros", trainable=True
672
+ )
673
+
674
+ def get_output_embeddings(self):
675
+ return self.get_input_embeddings()
676
+
677
+ def set_output_embeddings(self, value):
678
+ self.set_input_embeddings(value)
679
+
680
+ def get_bias(self):
681
+ return {"lm_head.bias": self.bias_layer.bias}
682
+
683
+ def set_bias(self, value):
684
+ # Replaces the existing layers containing bias for correct (de)serialization.
685
+ vocab_size = value["lm_head.bias"].shape[-1]
686
+ self.bias_layer = TFCTRLBiasLayer(
687
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=True
688
+ )
689
+ self.bias_layer.build(None)
690
+ self.bias_layer.bias.assign(value["lm_head.bias"])
691
+
692
+ # Copied from transformers.models.gpt2.modeling_tf_gpt2.TFGPT2LMHeadModel.prepare_inputs_for_generation
693
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
694
+ token_type_ids = kwargs.get("token_type_ids", None)
695
+ # only last token for inputs_ids if past is defined in kwargs
696
+ if past_key_values:
697
+ inputs = tf.expand_dims(inputs[:, -1], -1)
698
+ if token_type_ids is not None:
699
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
700
+
701
+ position_ids = kwargs.get("position_ids", None)
702
+ attention_mask = kwargs.get("attention_mask", None)
703
+
704
+ if attention_mask is not None and position_ids is None:
705
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
706
+ if past_key_values:
707
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
708
+
709
+ return {
710
+ "input_ids": inputs,
711
+ "attention_mask": attention_mask,
712
+ "position_ids": position_ids,
713
+ "past_key_values": past_key_values,
714
+ "use_cache": use_cache,
715
+ "token_type_ids": token_type_ids,
716
+ }
717
+
718
+ @unpack_inputs
719
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
720
+ @add_code_sample_docstrings(
721
+ checkpoint=_CHECKPOINT_FOR_DOC,
722
+ output_type=TFCausalLMOutputWithPast,
723
+ config_class=_CONFIG_FOR_DOC,
724
+ )
725
+ def call(
726
+ self,
727
+ input_ids: TFModelInputType | None = None,
728
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
729
+ attention_mask: np.ndarray | tf.Tensor | None = None,
730
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
731
+ position_ids: np.ndarray | tf.Tensor | None = None,
732
+ head_mask: np.ndarray | tf.Tensor | None = None,
733
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
734
+ use_cache: Optional[bool] = None,
735
+ output_attentions: Optional[bool] = None,
736
+ output_hidden_states: Optional[bool] = None,
737
+ return_dict: Optional[bool] = None,
738
+ labels: np.ndarray | tf.Tensor | None = None,
739
+ training: Optional[bool] = False,
740
+ ) -> Union[Tuple, TFCausalLMOutputWithPast]:
741
+ r"""
742
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
743
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
744
+ config.vocab_size - 1]`.
745
+ """
746
+ transformer_outputs = self.transformer(
747
+ input_ids=input_ids,
748
+ past_key_values=past_key_values,
749
+ attention_mask=attention_mask,
750
+ token_type_ids=token_type_ids,
751
+ position_ids=position_ids,
752
+ head_mask=head_mask,
753
+ inputs_embeds=inputs_embeds,
754
+ use_cache=use_cache,
755
+ output_attentions=output_attentions,
756
+ output_hidden_states=output_hidden_states,
757
+ return_dict=return_dict,
758
+ training=training,
759
+ )
760
+ hidden_states = transformer_outputs[0]
761
+ logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True)
762
+ logits = self.bias_layer(logits)
763
+
764
+ loss = None
765
+ if labels is not None:
766
+ # shift labels to the left and cut last logit token
767
+ shifted_logits = logits[:, :-1]
768
+ labels = labels[:, 1:]
769
+ loss = self.hf_compute_loss(labels, shifted_logits)
770
+
771
+ if not return_dict:
772
+ output = (logits,) + transformer_outputs[1:]
773
+ return ((loss,) + output) if loss is not None else output
774
+
775
+ return TFCausalLMOutputWithPast(
776
+ loss=loss,
777
+ logits=logits,
778
+ past_key_values=transformer_outputs.past_key_values,
779
+ hidden_states=transformer_outputs.hidden_states,
780
+ attentions=transformer_outputs.attentions,
781
+ )
782
+
783
+ def build(self, input_shape=None):
784
+ if self.built:
785
+ return
786
+ self.built = True
787
+ if getattr(self, "transformer", None) is not None:
788
+ with tf.name_scope(self.transformer.name):
789
+ self.transformer.build(None)
790
+ if getattr(self, "bias_layer", None) is not None:
791
+ with tf.name_scope(self.bias_layer.name):
792
+ self.bias_layer.build(None)
793
+
794
+
795
+ @add_start_docstrings(
796
+ """
797
+ The CTRL Model transformer with a sequence classification head on top (linear layer).
798
+
799
+ [`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
800
+ (e.g. GPT-1, GPT-2) do.
801
+
802
+ Since it does classification on the last token, it requires to know the position of the last token. If a
803
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
804
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
805
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
806
+ each row of the batch).
807
+ """,
808
+ CTRL_START_DOCSTRING,
809
+ )
810
+ class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):
811
+ def __init__(self, config, *inputs, **kwargs):
812
+ super().__init__(config, *inputs, **kwargs)
813
+ self.num_labels = config.num_labels
814
+ self.classifier = keras.layers.Dense(
815
+ config.num_labels,
816
+ kernel_initializer=get_initializer(config.initializer_range),
817
+ name="classifier",
818
+ use_bias=False,
819
+ )
820
+ self.transformer = TFCTRLMainLayer(config, name="transformer")
821
+ self.config = config
822
+
823
+ def get_output_embeddings(self):
824
+ # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
825
+ logger.warning(
826
+ "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
827
+ "in transformers v4.32."
828
+ )
829
+ return self.transformer.w
830
+
831
+ @unpack_inputs
832
+ @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
833
+ @add_code_sample_docstrings(
834
+ checkpoint=_CHECKPOINT_FOR_DOC,
835
+ output_type=TFSequenceClassifierOutput,
836
+ config_class=_CONFIG_FOR_DOC,
837
+ )
838
+ def call(
839
+ self,
840
+ input_ids: TFModelInputType | None = None,
841
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
842
+ attention_mask: np.ndarray | tf.Tensor | None = None,
843
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
844
+ position_ids: np.ndarray | tf.Tensor | None = None,
845
+ head_mask: np.ndarray | tf.Tensor | None = None,
846
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
847
+ use_cache: Optional[bool] = None,
848
+ output_attentions: Optional[bool] = None,
849
+ output_hidden_states: Optional[bool] = None,
850
+ return_dict: Optional[bool] = None,
851
+ labels: np.ndarray | tf.Tensor | None = None,
852
+ training: Optional[bool] = False,
853
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
854
+ r"""
855
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
856
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
857
+ config.vocab_size - 1]`.
858
+ """
859
+
860
+ transformer_outputs = self.transformer(
861
+ input_ids=input_ids,
862
+ past_key_values=past_key_values,
863
+ attention_mask=attention_mask,
864
+ token_type_ids=token_type_ids,
865
+ position_ids=position_ids,
866
+ head_mask=head_mask,
867
+ inputs_embeds=inputs_embeds,
868
+ use_cache=use_cache,
869
+ output_attentions=output_attentions,
870
+ output_hidden_states=output_hidden_states,
871
+ return_dict=return_dict,
872
+ training=training,
873
+ )
874
+
875
+ hidden_states = transformer_outputs[0]
876
+ logits = self.classifier(hidden_states)
877
+ in_logits = None
878
+ if self.config.pad_token_id is None:
879
+ sequence_lengths = -1
880
+ else:
881
+ if input_ids is not None:
882
+ sequence_lengths = (
883
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
884
+ - 1
885
+ )
886
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
887
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
888
+ else:
889
+ sequence_lengths = -1
890
+ logger.warning(
891
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
892
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
893
+ )
894
+ loss = None
895
+
896
+ if labels is not None:
897
+ if input_ids is not None:
898
+ batch_size, sequence_length = shape_list(input_ids)[:2]
899
+ else:
900
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
901
+ if self.config.pad_token_id is None and batch_size != 1:
902
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
903
+
904
+ if not tf.is_tensor(sequence_lengths):
905
+ in_logits = logits[0:batch_size, sequence_lengths]
906
+
907
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
908
+
909
+ pooled_logits = in_logits if in_logits is not None else logits
910
+
911
+ if not return_dict:
912
+ output = (pooled_logits,) + transformer_outputs[1:]
913
+ return ((loss,) + output) if loss is not None else output
914
+
915
+ return TFSequenceClassifierOutput(
916
+ loss=loss,
917
+ logits=pooled_logits,
918
+ hidden_states=transformer_outputs.hidden_states,
919
+ attentions=transformer_outputs.attentions,
920
+ )
921
+
922
+ def build(self, input_shape=None):
923
+ if self.built:
924
+ return
925
+ self.built = True
926
+ if getattr(self, "classifier", None) is not None:
927
+ with tf.name_scope(self.classifier.name):
928
+ self.classifier.build([None, None, self.config.n_embd])
929
+ if getattr(self, "transformer", None) is not None:
930
+ with tf.name_scope(self.transformer.name):
931
+ self.transformer.build(None)
venv/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Salesforce and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Salesforce CTRL."""
16
+
17
+
18
+ import json
19
+ import os
20
+ from typing import Optional, Tuple
21
+
22
+ import regex as re
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ }
34
+
35
+
36
+ CONTROL_CODES = {
37
+ "Pregnancy": 168629,
38
+ "Christianity": 7675,
39
+ "Explain": 106423,
40
+ "Fitness": 63440,
41
+ "Saving": 63163,
42
+ "Ask": 27171,
43
+ "Ass": 95985,
44
+ "Joke": 163509,
45
+ "Questions": 45622,
46
+ "Thoughts": 49605,
47
+ "Retail": 52342,
48
+ "Feminism": 164338,
49
+ "Writing": 11992,
50
+ "Atheism": 192263,
51
+ "Netflix": 48616,
52
+ "Computing": 39639,
53
+ "Opinion": 43213,
54
+ "Alone": 44967,
55
+ "Funny": 58917,
56
+ "Gaming": 40358,
57
+ "Human": 4088,
58
+ "India": 1331,
59
+ "Joker": 77138,
60
+ "Diet": 36206,
61
+ "Legal": 11859,
62
+ "Norman": 4939,
63
+ "Tip": 72689,
64
+ "Weight": 52343,
65
+ "Movies": 46273,
66
+ "Running": 23425,
67
+ "Science": 2090,
68
+ "Horror": 37793,
69
+ "Confession": 60572,
70
+ "Finance": 12250,
71
+ "Politics": 16360,
72
+ "Scary": 191985,
73
+ "Support": 12654,
74
+ "Technologies": 32516,
75
+ "Teenage": 66160,
76
+ "Event": 32769,
77
+ "Learned": 67460,
78
+ "Notion": 182770,
79
+ "Wikipedia": 37583,
80
+ "Books": 6665,
81
+ "Extract": 76050,
82
+ "Confessions": 102701,
83
+ "Conspiracy": 75932,
84
+ "Links": 63674,
85
+ "Narcissus": 150425,
86
+ "Relationship": 54766,
87
+ "Relationships": 134796,
88
+ "Reviews": 41671,
89
+ "News": 4256,
90
+ "Translation": 26820,
91
+ "multilingual": 128406,
92
+ }
93
+
94
+
95
+ def get_pairs(word):
96
+ """
97
+ Return set of symbol pairs in a word.
98
+
99
+ Word is represented as tuple of symbols (symbols being variable-length strings).
100
+ """
101
+ pairs = set()
102
+ prev_char = word[0]
103
+ for char in word[1:]:
104
+ pairs.add((prev_char, char))
105
+ prev_char = char
106
+
107
+ pairs = set(pairs)
108
+ return pairs
109
+
110
+
111
+ class CTRLTokenizer(PreTrainedTokenizer):
112
+ """
113
+ Construct a CTRL tokenizer. Based on Byte-Pair-Encoding.
114
+
115
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
116
+ this superclass for more information regarding those methods.
117
+
118
+ Args:
119
+ vocab_file (`str`):
120
+ Path to the vocabulary file.
121
+ merges_file (`str`):
122
+ Path to the merges file.
123
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
124
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
125
+ token instead.
126
+ """
127
+
128
+ vocab_files_names = VOCAB_FILES_NAMES
129
+ control_codes = CONTROL_CODES
130
+
131
+ def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
132
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
133
+ self.encoder = json.load(vocab_handle)
134
+ self.decoder = {v: k for k, v in self.encoder.items()}
135
+ with open(merges_file, encoding="utf-8") as merges_handle:
136
+ merges = merges_handle.read().split("\n")[1:-1]
137
+ merges = [tuple(merge.split()) for merge in merges]
138
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
139
+ self.cache = {}
140
+ super().__init__(unk_token=unk_token, **kwargs)
141
+
142
+ @property
143
+ def vocab_size(self):
144
+ return len(self.encoder)
145
+
146
+ def get_vocab(self):
147
+ return dict(self.encoder, **self.added_tokens_encoder)
148
+
149
+ def bpe(self, token):
150
+ if token in self.cache:
151
+ return self.cache[token]
152
+ word = tuple(token)
153
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
154
+ pairs = get_pairs(word)
155
+
156
+ if not pairs:
157
+ return token
158
+
159
+ while True:
160
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
161
+ if bigram not in self.bpe_ranks:
162
+ break
163
+ first, second = bigram
164
+ new_word = []
165
+ i = 0
166
+ while i < len(word):
167
+ try:
168
+ j = word.index(first, i)
169
+ except ValueError:
170
+ new_word.extend(word[i:])
171
+ break
172
+ else:
173
+ new_word.extend(word[i:j])
174
+ i = j
175
+
176
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
177
+ new_word.append(first + second)
178
+ i += 2
179
+ else:
180
+ new_word.append(word[i])
181
+ i += 1
182
+ new_word = tuple(new_word)
183
+ word = new_word
184
+ if len(word) == 1:
185
+ break
186
+ else:
187
+ pairs = get_pairs(word)
188
+ word = "@@ ".join(word)
189
+ word = word[:-4]
190
+ self.cache[token] = word
191
+ return word
192
+
193
+ def _tokenize(self, text):
194
+ """Tokenize a string."""
195
+ split_tokens = []
196
+
197
+ words = re.findall(r"\S+\n?", text)
198
+
199
+ for token in words:
200
+ split_tokens.extend(list(self.bpe(token).split(" ")))
201
+ return split_tokens
202
+
203
+ def _convert_token_to_id(self, token):
204
+ """Converts a token (str) in an id using the vocab."""
205
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
206
+
207
+ def _convert_id_to_token(self, index):
208
+ """Converts an index (integer) in a token (str) using the vocab."""
209
+ return self.decoder.get(index, self.unk_token)
210
+
211
+ def convert_tokens_to_string(self, tokens):
212
+ """Converts a sequence of tokens (string) in a single string."""
213
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
214
+ return out_string
215
+
216
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
217
+ if not os.path.isdir(save_directory):
218
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
219
+ return
220
+ vocab_file = os.path.join(
221
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
222
+ )
223
+ merge_file = os.path.join(
224
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
225
+ )
226
+
227
+ with open(vocab_file, "w", encoding="utf-8") as f:
228
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
229
+
230
+ index = 0
231
+ with open(merge_file, "w", encoding="utf-8") as writer:
232
+ writer.write("#version: 0.2\n")
233
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
234
+ if index != token_index:
235
+ logger.warning(
236
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
237
+ " Please check that the tokenizer is not corrupted!"
238
+ )
239
+ index = token_index
240
+ writer.write(" ".join(bpe_tokens) + "\n")
241
+ index += 1
242
+
243
+ return vocab_file, merge_file
244
+
245
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
246
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
247
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
248
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
249
+ # return ''.join(tokens_generated_so_far)
venv/lib/python3.10/site-packages/transformers/models/gemma/__init__.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_gemma": ["GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "GemmaConfig"],
28
+ }
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_gemma"] = ["GemmaTokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_gemma_fast"] = ["GemmaTokenizerFast"]
45
+
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_gemma"] = [
54
+ "GemmaForCausalLM",
55
+ "GemmaModel",
56
+ "GemmaPreTrainedModel",
57
+ "GemmaForSequenceClassification",
58
+ ]
59
+
60
+ try:
61
+ if not is_flax_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_flax_gemma"] = [
67
+ "FlaxGemmaForCausalLM",
68
+ "FlaxGemmaModel",
69
+ "FlaxGemmaPreTrainedModel",
70
+ ]
71
+
72
+
73
+ if TYPE_CHECKING:
74
+ from .configuration_gemma import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP, GemmaConfig
75
+
76
+ try:
77
+ if not is_sentencepiece_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_gemma import GemmaTokenizer
83
+
84
+ try:
85
+ if not is_tokenizers_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .tokenization_gemma_fast import GemmaTokenizerFast
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_gemma import (
99
+ GemmaForCausalLM,
100
+ GemmaForSequenceClassification,
101
+ GemmaModel,
102
+ GemmaPreTrainedModel,
103
+ )
104
+
105
+ try:
106
+ if not is_flax_available():
107
+ raise OptionalDependencyNotAvailable()
108
+ except OptionalDependencyNotAvailable:
109
+ pass
110
+ else:
111
+ from .modeling_flax_gemma import (
112
+ FlaxGemmaForCausalLM,
113
+ FlaxGemmaModel,
114
+ FlaxGemmaPreTrainedModel,
115
+ )
116
+
117
+
118
+ else:
119
+ import sys
120
+
121
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/configuration_gemma.cpython-310.pyc ADDED
Binary file (6.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/convert_gemma_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.57 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/modeling_flax_gemma.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/modeling_gemma.cpython-310.pyc ADDED
Binary file (40.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/tokenization_gemma.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/tokenization_gemma_fast.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gemma/configuration_gemma.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Gemma model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GemmaConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
30
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the Gemma-7B.
32
+
33
+ e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 256000):
41
+ Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`GemmaModel`]
43
+ hidden_size (`int`, *optional*, defaults to 3072):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 24576):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 28):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 16):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ head_dim (`int`, *optional*, defaults to 256):
60
+ The attention head dimension.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
62
+ The legacy activation function. It is overwritten by the `hidden_activation`.
63
+ hidden_activation (`str` or `function`, *optional*):
64
+ The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
65
+ if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
66
+ max_position_embeddings (`int`, *optional*, defaults to 8192):
67
+ The maximum sequence length that this model might ever be used with.
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
71
+ The epsilon used by the rms normalization layers.
72
+ use_cache (`bool`, *optional*, defaults to `True`):
73
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
74
+ relevant if `config.is_decoder=True`.
75
+ pad_token_id (`int`, *optional*, defaults to 0):
76
+ Padding token id.
77
+ eos_token_id (`int`, *optional*, defaults to 1):
78
+ End of stream token id.
79
+ bos_token_id (`int`, *optional*, defaults to 2):
80
+ Beginning of stream token id.
81
+ tie_word_embeddings (`bool`, *optional*, defaults to `True`):
82
+ Whether to tie weight embeddings
83
+ rope_theta (`float`, *optional*, defaults to 10000.0):
84
+ The base period of the RoPE embeddings.
85
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
86
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
87
+ attention_dropout (`float`, *optional*, defaults to 0.0):
88
+ The dropout ratio for the attention probabilities.
89
+
90
+ ```python
91
+ >>> from transformers import GemmaModel, GemmaConfig
92
+
93
+ >>> # Initializing a Gemma gemma-7b style configuration
94
+ >>> configuration = GemmaConfig()
95
+
96
+ >>> # Initializing a model from the gemma-7b style configuration
97
+ >>> model = GemmaModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "gemma"
104
+ keys_to_ignore_at_inference = ["past_key_values"]
105
+
106
+ def __init__(
107
+ self,
108
+ vocab_size=256000,
109
+ hidden_size=3072,
110
+ intermediate_size=24576,
111
+ num_hidden_layers=28,
112
+ num_attention_heads=16,
113
+ num_key_value_heads=16,
114
+ head_dim=256,
115
+ hidden_act="gelu_pytorch_tanh",
116
+ hidden_activation=None,
117
+ max_position_embeddings=8192,
118
+ initializer_range=0.02,
119
+ rms_norm_eps=1e-6,
120
+ use_cache=True,
121
+ pad_token_id=0,
122
+ eos_token_id=1,
123
+ bos_token_id=2,
124
+ tie_word_embeddings=True,
125
+ rope_theta=10000.0,
126
+ attention_bias=False,
127
+ attention_dropout=0.0,
128
+ **kwargs,
129
+ ):
130
+ self.vocab_size = vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.hidden_size = hidden_size
133
+ self.intermediate_size = intermediate_size
134
+ self.num_hidden_layers = num_hidden_layers
135
+ self.num_attention_heads = num_attention_heads
136
+ self.head_dim = head_dim
137
+ self.num_key_value_heads = num_key_value_heads
138
+ self.hidden_act = hidden_act
139
+ self.hidden_activation = hidden_activation
140
+ self.initializer_range = initializer_range
141
+ self.rms_norm_eps = rms_norm_eps
142
+ self.use_cache = use_cache
143
+ self.rope_theta = rope_theta
144
+ self.attention_bias = attention_bias
145
+ self.attention_dropout = attention_dropout
146
+
147
+ super().__init__(
148
+ pad_token_id=pad_token_id,
149
+ bos_token_id=bos_token_id,
150
+ eos_token_id=eos_token_id,
151
+ tie_word_embeddings=tie_word_embeddings,
152
+ **kwargs,
153
+ )
venv/lib/python3.10/site-packages/transformers/models/gemma/convert_gemma_weights_to_hf.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import argparse
15
+ import os
16
+ import warnings
17
+
18
+ import torch
19
+ from accelerate import init_empty_weights
20
+
21
+ from transformers import GemmaConfig, GemmaForCausalLM, GemmaTokenizer
22
+
23
+
24
+ try:
25
+ from transformers import GemmaTokenizerFast
26
+ except ImportError as e:
27
+ warnings.warn(e)
28
+ warnings.warn(
29
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
30
+ )
31
+ GemmaTokenizerFast = None
32
+
33
+ """
34
+ Sample usage:
35
+
36
+ ```
37
+ python src/transformers/models/gemma/convert_gemma_weights_to_hf.py \
38
+ --input_dir /path/to/downloaded/gemma/weights --model_size 7B --output_dir /output/path
39
+ ```
40
+
41
+ Thereafter, models can be loaded via:
42
+
43
+ ```py
44
+ from transformers import GemmaForCausalLM, GemmaTokenizerFast
45
+
46
+ model = GemmaForCausalLM.from_pretrained("/output/path")
47
+ tokenizer = GemmaTokenizerFast.from_pretrained("/output/path")
48
+ ```
49
+
50
+ Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
51
+ come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
52
+ """
53
+
54
+ gemma_2b_config = GemmaConfig(
55
+ num_hidden_layers=18,
56
+ num_attention_heads=8,
57
+ num_key_value_heads=1,
58
+ hidden_size=2048,
59
+ intermediate_size=16384,
60
+ )
61
+
62
+ gemma_7b_config = GemmaConfig()
63
+
64
+ CONFIG_MAPPING = {"2B": gemma_2b_config, "7B": gemma_7b_config}
65
+ LAYER_NAME_MAPPING = {"embedder.weight": "model.embed_tokens.weight"}
66
+
67
+
68
+ def write_model(save_path, input_base_path, config, safe_serialization=True, push_to_hub=False, dtype=torch.float32):
69
+ num_attn_heads = config.num_attention_heads
70
+ hidden_size = config.hidden_size
71
+ num_kv_heads = config.num_key_value_heads
72
+ head_dim = config.head_dim
73
+
74
+ print(f"Fetching all parameters from the checkpoint at '{input_base_path}'")
75
+ model_state_dict = torch.load(input_base_path, map_location="cpu")["model_state_dict"]
76
+ model_state_dict.pop("freqs_cis")
77
+
78
+ state_dict = {}
79
+ for k, v in model_state_dict.items():
80
+ if "qkv_proj" in k:
81
+ if num_kv_heads == 1:
82
+ v = v.reshape(num_attn_heads + num_kv_heads * 2, head_dim, hidden_size)
83
+ q_proj = v[:num_attn_heads, ...]
84
+ k_proj = v[num_attn_heads : num_attn_heads + num_kv_heads, ...].repeat(num_kv_heads, 1, 1)
85
+ v_proj = v[-num_kv_heads:, ...].repeat(num_kv_heads, 1, 1)
86
+
87
+ state_dict[k.replace("qkv_proj", "q_proj")] = q_proj.reshape(
88
+ num_attn_heads * head_dim, hidden_size
89
+ ).clone()
90
+ state_dict[k.replace("qkv_proj", "k_proj")] = k_proj.reshape(
91
+ num_kv_heads * head_dim, hidden_size
92
+ ).clone()
93
+ state_dict[k.replace("qkv_proj", "v_proj")] = v_proj[0].clone()
94
+ else:
95
+ q_proj, k_proj, v_proj = torch.split(v, v.shape[0] // 3, 0)
96
+ state_dict[k.replace("qkv_proj", "q_proj")] = q_proj.reshape(
97
+ num_attn_heads * head_dim, hidden_size
98
+ ).clone()
99
+ state_dict[k.replace("qkv_proj", "k_proj")] = k_proj.reshape(
100
+ num_kv_heads * head_dim, hidden_size
101
+ ).clone()
102
+ state_dict[k.replace("qkv_proj", "v_proj")] = v_proj.clone()
103
+
104
+ elif k == "embedder.weight":
105
+ state_dict[LAYER_NAME_MAPPING[k]] = v
106
+ state_dict["lm_head.weight"] = v
107
+ else:
108
+ state_dict[k] = v
109
+
110
+ torch.set_default_dtype(dtype)
111
+
112
+ print("Loading the checkpoint in a Gemma model.")
113
+ with init_empty_weights():
114
+ model = GemmaForCausalLM(config)
115
+ model.load_state_dict(state_dict, assign=True, strict=False)
116
+
117
+ model.config.torch_dtype = torch.float32
118
+ del model.config._name_or_path
119
+ print("Saving in the Transformers format.")
120
+
121
+ if push_to_hub:
122
+ print(f"pushing the model to {save_path}")
123
+ model.push_to_hub(save_path, safe_serialization=safe_serialization, private=True)
124
+ else:
125
+ model.save_pretrained(save_path, safe_serialization=safe_serialization)
126
+
127
+
128
+ def write_tokenizer(input_tokenizer_path, save_path, push_to_hub=False):
129
+ # Initialize the tokenizer based on the `spm` model
130
+ tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast
131
+ print(f"Saving a {tokenizer_class.__name__} to {save_path}.")
132
+ tokenizer = tokenizer_class(input_tokenizer_path)
133
+ if push_to_hub:
134
+ tokenizer.push_to_hub(save_path)
135
+ else:
136
+ tokenizer.save_pretrained(save_path)
137
+
138
+
139
+ def main():
140
+ parser = argparse.ArgumentParser()
141
+ parser.add_argument(
142
+ "--input_checkpoint",
143
+ help="Absolute path to the target Gemma weights.",
144
+ required=True,
145
+ )
146
+ parser.add_argument(
147
+ "--tokenizer_checkpoint",
148
+ help="Location of Gemma tokenizer model",
149
+ )
150
+ parser.add_argument(
151
+ "--model_size",
152
+ default="7B",
153
+ choices=["2B", "7B", "tokenizer_only"],
154
+ help="'f' models correspond to the finetuned versions, and are specific to the Gemma2 official release. For more details on Gemma2, checkout the original repo: https://huggingface.co/google/gemma-7b",
155
+ )
156
+ parser.add_argument(
157
+ "--output_dir",
158
+ default="google/gemma-7b",
159
+ help="Location to write HF model and tokenizer",
160
+ )
161
+ parser.add_argument(
162
+ "--pickle_serialization",
163
+ help="Whether or not to save using `safetensors`.",
164
+ action="store_true",
165
+ default=False,
166
+ )
167
+ parser.add_argument(
168
+ "--convert_tokenizer",
169
+ help="Whether or not to convert the tokenizer as well.",
170
+ action="store_true",
171
+ default=False,
172
+ )
173
+ parser.add_argument(
174
+ "--push_to_hub",
175
+ help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.",
176
+ action="store_true",
177
+ default=False,
178
+ )
179
+ parser.add_argument(
180
+ "--dtype",
181
+ default="float32",
182
+ help="Target dtype of the converted model",
183
+ )
184
+ args = parser.parse_args()
185
+
186
+ if args.convert_tokenizer:
187
+ if args.tokenizer_checkpoint is None:
188
+ raise ValueError("Path to the tokenizer is required when passing --convert_tokenizer")
189
+
190
+ spm_path = os.path.join(args.tokenizer_checkpoint)
191
+ write_tokenizer(spm_path, args.output_dir, args.push_to_hub)
192
+
193
+ config = CONFIG_MAPPING[args.model_size]
194
+ dtype = getattr(torch, args.dtype)
195
+ write_model(
196
+ config=config,
197
+ input_base_path=args.input_checkpoint,
198
+ save_path=args.output_dir,
199
+ safe_serialization=not args.pickle_serialization,
200
+ push_to_hub=args.push_to_hub,
201
+ dtype=dtype,
202
+ )
203
+
204
+
205
+ if __name__ == "__main__":
206
+ main()
venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_flax_gemma.py ADDED
@@ -0,0 +1,773 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google Inc., and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Flax Gemma model."""
16
+ from typing import Optional, Tuple
17
+
18
+ import flax.linen as nn
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.linen import combine_masks, make_causal_mask
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
29
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
30
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
31
+ from .configuration_gemma import GemmaConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CONFIG_FOR_DOC = "GemmaConfig"
37
+ _CHECKPOINT_FOR_DOC = "google/gemma-2b"
38
+ _REAL_CHECKPOINT_FOR_DOC = "openlm-research/open_llama_3b_v2"
39
+
40
+ GEMMA_START_DOCSTRING = r"""
41
+
42
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
43
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
44
+ etc.)
45
+
46
+ This model is also a Flax Linen
47
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
48
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
49
+
50
+ Finally, this model supports inherent JAX features such as:
51
+
52
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
53
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
54
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
55
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
56
+
57
+ Parameters:
58
+ config ([`GemmaConfig`]): Model configuration class with all the parameters of the model.
59
+ Initializing with a config file does not load the weights associated with the model, only the
60
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
61
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
62
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or
63
+ `jax.numpy.bfloat16`.
64
+
65
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
66
+ specified all the computation will be performed with the given `dtype`.
67
+
68
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
69
+ parameters.**
70
+
71
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
72
+ [`~FlaxPreTrainedModel.to_bf16`].
73
+ """
74
+
75
+ GEMMA_INPUTS_DOCSTRING = r"""
76
+ Args:
77
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
78
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
79
+ it.
80
+
81
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
82
+ [`PreTrainedTokenizer.__call__`] for details.
83
+
84
+ [What are input IDs?](../glossary#input-ids)
85
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
87
+
88
+ - 1 for tokens that are **not masked**,
89
+ - 0 for tokens that are **masked**.
90
+
91
+ [What are attention masks?](../glossary#attention-mask)
92
+
93
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
94
+ [`PreTrainedTokenizer.__call__`] for details.
95
+
96
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
97
+ `past_key_values`).
98
+
99
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
100
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
101
+ information on the default strategy.
102
+
103
+ - 1 indicates the head is **not masked**,
104
+ - 0 indicates the head is **masked**.
105
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
106
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
107
+ config.n_positions - 1]`.
108
+
109
+ [What are position IDs?](../glossary#position-ids)
110
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
111
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
112
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
113
+ output_attentions (`bool`, *optional*):
114
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
115
+ tensors for more detail.
116
+ output_hidden_states (`bool`, *optional*):
117
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
118
+ more detail.
119
+ return_dict (`bool`, *optional*):
120
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
121
+ """
122
+
123
+
124
+ def create_sinusoidal_positions(num_pos, dim):
125
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2)[: (dim // 2)] / dim))
126
+ freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
127
+
128
+ emb = np.concatenate((freqs, freqs), axis=-1)
129
+ out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1)
130
+ return jnp.array(out[:, :, :num_pos])
131
+
132
+
133
+ # Copied from transformers.models.llama.modeling_flax_llama.rotate_half
134
+ def rotate_half(tensor):
135
+ """Rotates half the hidden dims of the input."""
136
+ rotate_half_tensor = jnp.concatenate(
137
+ (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1
138
+ )
139
+ return rotate_half_tensor
140
+
141
+
142
+ # Copied from transformers.models.llama.modeling_flax_llama.apply_rotary_pos_emb
143
+ def apply_rotary_pos_emb(tensor, sin_pos, cos_pos):
144
+ return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos)
145
+
146
+
147
+ class FlaxGemmaRMSNorm(nn.Module):
148
+ config: GemmaConfig
149
+ dtype: jnp.dtype = jnp.float32
150
+
151
+ def setup(self):
152
+ self.epsilon = self.config.rms_norm_eps
153
+ self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size)
154
+
155
+ def __call__(self, hidden_states):
156
+ variance = jnp.asarray(hidden_states, dtype=jnp.float32)
157
+ variance = jnp.power(variance, 2)
158
+ variance = variance.mean(-1, keepdims=True)
159
+ # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt`
160
+ hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon)
161
+
162
+ return (1 + self.weight) * jnp.asarray(hidden_states, dtype=self.dtype)
163
+
164
+
165
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaRotaryEmbedding with Llama->Gemma
166
+ class FlaxGemmaRotaryEmbedding(nn.Module):
167
+ config: GemmaConfig
168
+ dtype: jnp.dtype = jnp.float32
169
+
170
+ # Ignore copy
171
+ def setup(self):
172
+ head_dim = self.config.head_dim
173
+ self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim)
174
+
175
+ def __call__(self, key, query, position_ids):
176
+ sincos = self.sincos[position_ids]
177
+ sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1)
178
+
179
+ key = apply_rotary_pos_emb(key, sin_pos, cos_pos)
180
+ query = apply_rotary_pos_emb(query, sin_pos, cos_pos)
181
+
182
+ key = jnp.asarray(key, dtype=self.dtype)
183
+ query = jnp.asarray(query, dtype=self.dtype)
184
+
185
+ return key, query
186
+
187
+
188
+ class FlaxGemmaAttention(nn.Module):
189
+ config: GemmaConfig
190
+ dtype: jnp.dtype = jnp.float32
191
+ causal: bool = True
192
+ is_cross_attention: bool = False
193
+
194
+ def setup(self):
195
+ config = self.config
196
+ self.embed_dim = config.hidden_size
197
+ self.num_heads = config.num_attention_heads
198
+ self.head_dim = config.head_dim
199
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
200
+
201
+ self.num_key_value_heads = config.num_key_value_heads
202
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
203
+
204
+ kernel = jax.nn.initializers.normal(self.config.initializer_range)
205
+ self.q_proj = nn.Dense(
206
+ self.num_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel
207
+ )
208
+ self.k_proj = nn.Dense(
209
+ self.num_key_value_heads * self.head_dim,
210
+ use_bias=config.attention_bias,
211
+ dtype=self.dtype,
212
+ kernel_init=kernel,
213
+ )
214
+ self.v_proj = nn.Dense(
215
+ self.num_key_value_heads * self.head_dim,
216
+ use_bias=config.attention_bias,
217
+ dtype=self.dtype,
218
+ kernel_init=kernel,
219
+ )
220
+ self.o_proj = nn.Dense(self.embed_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel)
221
+
222
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
223
+ self.rotary_emb = FlaxGemmaRotaryEmbedding(config, dtype=self.dtype)
224
+
225
+ def _split_heads(self, hidden_states, num_heads):
226
+ return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim))
227
+
228
+ def _merge_heads(self, hidden_states):
229
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads * self.head_dim,))
230
+
231
+ @nn.compact
232
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache
233
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
234
+ """
235
+ This function takes projected key, value states from a single input token and concatenates the states to cached
236
+ states from previous steps. This function is slighly adapted from the official Flax repository:
237
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
238
+ """
239
+ # detect if we're initializing by absence of existing cache data.
240
+ is_initialized = self.has_variable("cache", "cached_key")
241
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
242
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
243
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
244
+
245
+ if is_initialized:
246
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
247
+ # update key, value caches with our new 1d spatial slices
248
+ cur_index = cache_index.value
249
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
250
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
251
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
252
+ cached_key.value = key
253
+ cached_value.value = value
254
+ num_updated_cache_vectors = query.shape[1]
255
+ cache_index.value = cache_index.value + num_updated_cache_vectors
256
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
257
+ pad_mask = jnp.broadcast_to(
258
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
259
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
260
+ )
261
+ attention_mask = combine_masks(pad_mask, attention_mask)
262
+ return key, value, attention_mask
263
+
264
+ def __call__(
265
+ self,
266
+ hidden_states,
267
+ attention_mask,
268
+ position_ids,
269
+ deterministic: bool = True,
270
+ init_cache: bool = False,
271
+ output_attentions: bool = False,
272
+ ):
273
+ query = self.q_proj(hidden_states)
274
+ key = self.k_proj(hidden_states)
275
+ value = self.v_proj(hidden_states)
276
+
277
+ query = self._split_heads(query, self.num_heads)
278
+ key = self._split_heads(key, self.num_key_value_heads)
279
+ value = self._split_heads(value, self.num_key_value_heads)
280
+
281
+ key, query = self.rotary_emb(key, query, position_ids)
282
+
283
+ query_length, key_length = query.shape[1], key.shape[1]
284
+
285
+ if self.has_variable("cache", "cached_key"):
286
+ mask_shift = self.variables["cache"]["cache_index"]
287
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
288
+ causal_mask = lax.dynamic_slice(
289
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
290
+ )
291
+ else:
292
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
293
+
294
+ batch_size = hidden_states.shape[0]
295
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
296
+
297
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
298
+ attention_mask = combine_masks(attention_mask, causal_mask)
299
+
300
+ dropout_rng = None
301
+ if not deterministic and self.config.attention_dropout > 0.0:
302
+ dropout_rng = self.make_rng("dropout")
303
+
304
+ # During fast autoregressive decoding, we feed one position at a time,
305
+ # and cache the keys and values step by step.
306
+ if self.has_variable("cache", "cached_key") or init_cache:
307
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
308
+
309
+ # transform boolean mask into float mask
310
+ attention_bias = lax.select(
311
+ attention_mask > 0,
312
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
313
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
314
+ )
315
+
316
+ key = jnp.repeat(key, repeats=self.num_key_value_groups, axis=2)
317
+ value = jnp.repeat(value, repeats=self.num_key_value_groups, axis=2)
318
+
319
+ # usual dot product attention
320
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
321
+ attn_weights = dot_product_attention_weights(
322
+ query,
323
+ key,
324
+ bias=attention_bias,
325
+ dropout_rng=dropout_rng,
326
+ dropout_rate=self.config.attention_dropout,
327
+ deterministic=deterministic,
328
+ dtype=attention_dtype,
329
+ )
330
+
331
+ if self.attention_softmax_in_fp32:
332
+ attn_weights = attn_weights.astype(self.dtype)
333
+
334
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
335
+ attn_output = self._merge_heads(attn_output)
336
+ attn_output = self.o_proj(attn_output)
337
+
338
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
339
+ return outputs
340
+
341
+
342
+ class FlaxGemmaMLP(nn.Module):
343
+ config: GemmaConfig
344
+ dtype: jnp.dtype = jnp.float32
345
+
346
+ def setup(self):
347
+ embed_dim = self.config.hidden_size
348
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim
349
+
350
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
351
+ if self.config.hidden_activation is None:
352
+ logger.warning_once(
353
+ "Gemma's activation function should be approximate GeLU and not exact GeLU. "
354
+ "Changing the activation function to `gelu_pytorch_tanh`."
355
+ f"if you want to use the legacy `{self.config.hidden_act}`, "
356
+ f"edit the `model.config` to set `hidden_activation={self.config.hidden_act}` "
357
+ " instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details."
358
+ )
359
+ hidden_activation = "gelu_pytorch_tanh"
360
+ else:
361
+ hidden_activation = self.config.hidden_activation
362
+ self.act = ACT2FN[hidden_activation]
363
+
364
+ self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
365
+ self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
366
+ self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
367
+
368
+ def __call__(self, hidden_states):
369
+ up_proj_states = self.up_proj(hidden_states)
370
+ gate_states = self.act(self.gate_proj(hidden_states))
371
+
372
+ hidden_states = self.down_proj(up_proj_states * gate_states)
373
+ return hidden_states
374
+
375
+
376
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaDecoderLayer with Llama->Gemma
377
+ class FlaxGemmaDecoderLayer(nn.Module):
378
+ config: GemmaConfig
379
+ dtype: jnp.dtype = jnp.float32
380
+
381
+ def setup(self):
382
+ self.input_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype)
383
+ self.self_attn = FlaxGemmaAttention(self.config, dtype=self.dtype)
384
+ self.post_attention_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype)
385
+ self.mlp = FlaxGemmaMLP(self.config, dtype=self.dtype)
386
+
387
+ def __call__(
388
+ self,
389
+ hidden_states,
390
+ attention_mask=None,
391
+ position_ids=None,
392
+ deterministic: bool = True,
393
+ init_cache: bool = False,
394
+ output_attentions: bool = False,
395
+ ):
396
+ residual = hidden_states
397
+ hidden_states = self.input_layernorm(hidden_states)
398
+ outputs = self.self_attn(
399
+ hidden_states,
400
+ attention_mask=attention_mask,
401
+ position_ids=position_ids,
402
+ deterministic=deterministic,
403
+ init_cache=init_cache,
404
+ output_attentions=output_attentions,
405
+ )
406
+ # residual connection
407
+ attn_output = outputs[0]
408
+ hidden_states = residual + attn_output
409
+
410
+ residual = hidden_states
411
+ hidden_states = self.post_attention_layernorm(hidden_states)
412
+ hidden_states = self.mlp(hidden_states)
413
+ # residual connection
414
+ hidden_states = residual + hidden_states
415
+
416
+ return (hidden_states,) + outputs[1:]
417
+
418
+
419
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Gemma, GPT_NEO->GEMMA, transformer->model
420
+ class FlaxGemmaPreTrainedModel(FlaxPreTrainedModel):
421
+ """
422
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
423
+ models.
424
+ """
425
+
426
+ config_class = GemmaConfig
427
+ base_model_prefix = "model"
428
+ module_class: nn.Module = None
429
+
430
+ def __init__(
431
+ self,
432
+ config: GemmaConfig,
433
+ input_shape: Tuple = (1, 1),
434
+ seed: int = 0,
435
+ dtype: jnp.dtype = jnp.float32,
436
+ _do_init: bool = True,
437
+ **kwargs,
438
+ ):
439
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
440
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
441
+
442
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
443
+ # init input tensors
444
+ input_ids = jnp.zeros(input_shape, dtype="i4")
445
+ attention_mask = jnp.ones_like(input_ids)
446
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
447
+ params_rng, dropout_rng = jax.random.split(rng)
448
+ rngs = {"params": params_rng, "dropout": dropout_rng}
449
+
450
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
451
+
452
+ if params is not None:
453
+ random_params = flatten_dict(unfreeze(random_params))
454
+ params = flatten_dict(unfreeze(params))
455
+ for missing_key in self._missing_keys:
456
+ params[missing_key] = random_params[missing_key]
457
+ self._missing_keys = set()
458
+ return freeze(unflatten_dict(params))
459
+ else:
460
+ return random_params
461
+
462
+ def init_cache(self, batch_size, max_length):
463
+ r"""
464
+ Args:
465
+ batch_size (`int`):
466
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
467
+ max_length (`int`):
468
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
469
+ cache.
470
+ """
471
+ # init input variables to retrieve cache
472
+ input_ids = jnp.ones((batch_size, max_length))
473
+ attention_mask = jnp.ones_like(input_ids)
474
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
475
+
476
+ init_variables = self.module.init(
477
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
478
+ )
479
+ return unfreeze(init_variables["cache"])
480
+
481
+ @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
482
+ def __call__(
483
+ self,
484
+ input_ids,
485
+ attention_mask=None,
486
+ position_ids=None,
487
+ params: dict = None,
488
+ past_key_values: dict = None,
489
+ dropout_rng: jax.random.PRNGKey = None,
490
+ train: bool = False,
491
+ output_attentions: Optional[bool] = None,
492
+ output_hidden_states: Optional[bool] = None,
493
+ return_dict: Optional[bool] = None,
494
+ ):
495
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
496
+ output_hidden_states = (
497
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
498
+ )
499
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
500
+
501
+ batch_size, sequence_length = input_ids.shape
502
+
503
+ if position_ids is None:
504
+ if past_key_values is not None:
505
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
506
+
507
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
508
+
509
+ if attention_mask is None:
510
+ attention_mask = jnp.ones((batch_size, sequence_length))
511
+
512
+ # Handle any PRNG if needed
513
+ rngs = {}
514
+ if dropout_rng is not None:
515
+ rngs["dropout"] = dropout_rng
516
+
517
+ inputs = {"params": params or self.params}
518
+
519
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGemmaAttention module
520
+ if past_key_values:
521
+ inputs["cache"] = past_key_values
522
+ mutable = ["cache"]
523
+ else:
524
+ mutable = False
525
+
526
+ outputs = self.module.apply(
527
+ inputs,
528
+ jnp.array(input_ids, dtype="i4"),
529
+ jnp.array(attention_mask, dtype="i4"),
530
+ jnp.array(position_ids, dtype="i4"),
531
+ not train,
532
+ False,
533
+ output_attentions,
534
+ output_hidden_states,
535
+ return_dict,
536
+ rngs=rngs,
537
+ mutable=mutable,
538
+ )
539
+
540
+ # add updated cache to model output
541
+ if past_key_values is not None and return_dict:
542
+ outputs, past_key_values = outputs
543
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
544
+ return outputs
545
+ elif past_key_values is not None and not return_dict:
546
+ outputs, past_key_values = outputs
547
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
548
+
549
+ return outputs
550
+
551
+
552
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaLayerCollection with Llama->Gemma
553
+ class FlaxGemmaLayerCollection(nn.Module):
554
+ config: GemmaConfig
555
+ dtype: jnp.dtype = jnp.float32
556
+
557
+ def setup(self):
558
+ self.blocks = [
559
+ FlaxGemmaDecoderLayer(self.config, dtype=self.dtype, name=str(i))
560
+ for i in range(self.config.num_hidden_layers)
561
+ ]
562
+
563
+ def __call__(
564
+ self,
565
+ hidden_states,
566
+ attention_mask=None,
567
+ position_ids=None,
568
+ deterministic: bool = True,
569
+ init_cache: bool = False,
570
+ output_attentions: bool = False,
571
+ output_hidden_states: bool = False,
572
+ return_dict: bool = False,
573
+ ):
574
+ all_attentions = () if output_attentions else None
575
+ all_hidden_states = () if output_hidden_states else None
576
+
577
+ for block in self.blocks:
578
+ if output_hidden_states:
579
+ all_hidden_states += (hidden_states,)
580
+ layer_outputs = block(
581
+ hidden_states,
582
+ attention_mask=attention_mask,
583
+ position_ids=position_ids,
584
+ deterministic=deterministic,
585
+ init_cache=init_cache,
586
+ output_attentions=output_attentions,
587
+ )
588
+ hidden_states = layer_outputs[0]
589
+
590
+ if output_attentions:
591
+ all_attentions += (layer_outputs[1],)
592
+
593
+ # this contains possible `None` values - `FlaxGemmaModule` will filter them out
594
+ outputs = (hidden_states, all_hidden_states, all_attentions)
595
+
596
+ return outputs
597
+
598
+
599
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaModule with Llama->Gemma
600
+ class FlaxGemmaModule(nn.Module):
601
+ config: GemmaConfig
602
+ dtype: jnp.dtype = jnp.float32
603
+
604
+ def setup(self):
605
+ self.hidden_size = self.config.hidden_size
606
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
607
+ self.embed_tokens = nn.Embed(
608
+ self.config.vocab_size,
609
+ self.hidden_size,
610
+ embedding_init=embedding_init,
611
+ dtype=self.dtype,
612
+ )
613
+ self.layers = FlaxGemmaLayerCollection(self.config, dtype=self.dtype)
614
+ self.norm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype)
615
+
616
+ # Ignore copy
617
+ def __call__(
618
+ self,
619
+ input_ids,
620
+ attention_mask=None,
621
+ position_ids=None,
622
+ deterministic=True,
623
+ init_cache: bool = False,
624
+ output_attentions: bool = False,
625
+ output_hidden_states: bool = False,
626
+ return_dict: bool = True,
627
+ ):
628
+ input_embeds = self.embed_tokens(input_ids.astype("i4"))
629
+
630
+ input_embeds = input_embeds * (self.config.hidden_size**0.5)
631
+
632
+ outputs = self.layers(
633
+ input_embeds,
634
+ position_ids=position_ids,
635
+ attention_mask=attention_mask,
636
+ deterministic=deterministic,
637
+ init_cache=init_cache,
638
+ output_attentions=output_attentions,
639
+ output_hidden_states=output_hidden_states,
640
+ return_dict=return_dict,
641
+ )
642
+
643
+ hidden_states = outputs[0]
644
+ hidden_states = self.norm(hidden_states)
645
+
646
+ if output_hidden_states:
647
+ all_hidden_states = outputs[1] + (hidden_states,)
648
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
649
+ else:
650
+ outputs = (hidden_states,) + outputs[1:]
651
+
652
+ if not return_dict:
653
+ return tuple(v for v in outputs if v is not None)
654
+
655
+ return FlaxBaseModelOutput(
656
+ last_hidden_state=hidden_states,
657
+ hidden_states=outputs[1],
658
+ attentions=outputs[-1],
659
+ )
660
+
661
+
662
+ @add_start_docstrings(
663
+ "The bare Gemma Model transformer outputting raw hidden-states without any specific head on top.",
664
+ GEMMA_START_DOCSTRING,
665
+ )
666
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaModel with Llama->Gemma
667
+ class FlaxGemmaModel(FlaxGemmaPreTrainedModel):
668
+ module_class = FlaxGemmaModule
669
+
670
+
671
+ append_call_sample_docstring(
672
+ FlaxGemmaModel,
673
+ _CHECKPOINT_FOR_DOC,
674
+ FlaxBaseModelOutput,
675
+ _CONFIG_FOR_DOC,
676
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
677
+ )
678
+
679
+
680
+ # Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaForCausalLMModule with Llama->Gemma
681
+ class FlaxGemmaForCausalLMModule(nn.Module):
682
+ config: GemmaConfig
683
+ dtype: jnp.dtype = jnp.float32
684
+
685
+ def setup(self):
686
+ self.model = FlaxGemmaModule(self.config, dtype=self.dtype)
687
+ self.lm_head = nn.Dense(
688
+ self.config.vocab_size,
689
+ use_bias=False,
690
+ dtype=self.dtype,
691
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
692
+ )
693
+
694
+ # Ignore copy
695
+ def __call__(
696
+ self,
697
+ input_ids,
698
+ attention_mask=None,
699
+ position_ids=None,
700
+ deterministic: bool = True,
701
+ init_cache: bool = False,
702
+ output_attentions: bool = False,
703
+ output_hidden_states: bool = False,
704
+ return_dict: bool = True,
705
+ ):
706
+ outputs = self.model(
707
+ input_ids,
708
+ position_ids=position_ids,
709
+ attention_mask=attention_mask,
710
+ deterministic=deterministic,
711
+ init_cache=init_cache,
712
+ output_attentions=output_attentions,
713
+ output_hidden_states=output_hidden_states,
714
+ return_dict=return_dict,
715
+ )
716
+
717
+ hidden_states = outputs[0]
718
+ if self.config.tie_word_embeddings:
719
+ shared_kernel = self.model.variables["params"]["embed_tokens"]["embedding"].T
720
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
721
+ else:
722
+ lm_logits = self.lm_head(hidden_states)
723
+
724
+ if not return_dict:
725
+ return (lm_logits,) + outputs[1:]
726
+
727
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
728
+
729
+
730
+ @add_start_docstrings(
731
+ """
732
+ The Gemma Model transformer with a language modeling head (linear layer) on top.
733
+ """,
734
+ GEMMA_START_DOCSTRING,
735
+ )
736
+ # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Gemma
737
+ class FlaxGemmaForCausalLM(FlaxGemmaPreTrainedModel):
738
+ module_class = FlaxGemmaForCausalLMModule
739
+
740
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
741
+ # initializing the cache
742
+ batch_size, seq_length = input_ids.shape
743
+
744
+ past_key_values = self.init_cache(batch_size, max_length)
745
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
746
+ # But since Gemma uses a causal mask, those positions are masked anyways.
747
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
748
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
749
+ if attention_mask is not None:
750
+ position_ids = attention_mask.cumsum(axis=-1) - 1
751
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
752
+ else:
753
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
754
+
755
+ return {
756
+ "past_key_values": past_key_values,
757
+ "attention_mask": extended_attention_mask,
758
+ "position_ids": position_ids,
759
+ }
760
+
761
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
762
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
763
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
764
+ return model_kwargs
765
+
766
+
767
+ append_call_sample_docstring(
768
+ FlaxGemmaForCausalLM,
769
+ _CHECKPOINT_FOR_DOC,
770
+ FlaxCausalLMOutput,
771
+ _CONFIG_FOR_DOC,
772
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
773
+ )
venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_gemma.py ADDED
@@ -0,0 +1,1372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch Gemma model."""
17
+
18
+ import math
19
+ import warnings
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...cache_utils import Cache, DynamicCache, StaticCache
30
+ from ...modeling_attn_mask_utils import (
31
+ AttentionMaskConverter,
32
+ _prepare_4d_causal_attention_mask,
33
+ )
34
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_13
37
+ from ...utils import (
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ is_flash_attn_2_available,
41
+ is_flash_attn_greater_or_equal_2_10,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from ...utils.import_utils import is_torch_fx_available
46
+ from .configuration_gemma import GemmaConfig
47
+
48
+
49
+ if is_flash_attn_2_available():
50
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
51
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
52
+
53
+
54
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
55
+ # It means that the function will not be traced through and simply appear as a node in the graph.
56
+ if is_torch_fx_available():
57
+ if not is_torch_greater_or_equal_than_1_13:
58
+ import torch.fx
59
+
60
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CONFIG_FOR_DOC = "GemmaConfig"
66
+
67
+
68
+ def _get_unpad_data(attention_mask):
69
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
70
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
71
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
72
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
73
+ return (
74
+ indices,
75
+ cu_seqlens,
76
+ max_seqlen_in_batch,
77
+ )
78
+
79
+
80
+ class GemmaRMSNorm(nn.Module):
81
+ def __init__(self, dim: int, eps: float = 1e-6):
82
+ super().__init__()
83
+ self.eps = eps
84
+ self.weight = nn.Parameter(torch.zeros(dim))
85
+
86
+ def _norm(self, x):
87
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
88
+
89
+ def forward(self, x):
90
+ output = self._norm(x.float())
91
+ # Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)
92
+ # See https://github.com/huggingface/transformers/pull/29402
93
+ output = output * (1.0 + self.weight.float())
94
+ return output.type_as(x)
95
+
96
+
97
+ ALL_LAYERNORM_LAYERS.append(GemmaRMSNorm)
98
+
99
+
100
+ class GemmaRotaryEmbedding(nn.Module):
101
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
102
+ super().__init__()
103
+
104
+ self.dim = dim
105
+ self.max_position_embeddings = max_position_embeddings
106
+ self.base = base
107
+ self.register_buffer("inv_freq", None, persistent=False)
108
+
109
+ @torch.no_grad()
110
+ def forward(self, x, position_ids, seq_len=None):
111
+ # x: [bs, num_attention_heads, seq_len, head_size]
112
+ if self.inv_freq is None:
113
+ self.inv_freq = 1.0 / (
114
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
115
+ )
116
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
117
+ position_ids_expanded = position_ids[:, None, :].float()
118
+ # Force float32 since bfloat16 loses precision on long contexts
119
+ # See https://github.com/huggingface/transformers/pull/29285
120
+ device_type = x.device.type
121
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
122
+ with torch.autocast(device_type=device_type, enabled=False):
123
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
124
+ emb = torch.cat((freqs, freqs), dim=-1)
125
+ cos = emb.cos()
126
+ sin = emb.sin()
127
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
128
+
129
+
130
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
131
+ def rotate_half(x):
132
+ """Rotates half the hidden dims of the input."""
133
+ x1 = x[..., : x.shape[-1] // 2]
134
+ x2 = x[..., x.shape[-1] // 2 :]
135
+ return torch.cat((-x2, x1), dim=-1)
136
+
137
+
138
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
139
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
140
+ """Applies Rotary Position Embedding to the query and key tensors.
141
+
142
+ Args:
143
+ q (`torch.Tensor`): The query tensor.
144
+ k (`torch.Tensor`): The key tensor.
145
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
146
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
147
+ position_ids (`torch.Tensor`, *optional*):
148
+ Deprecated and unused.
149
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
150
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
151
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
152
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
153
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
154
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
155
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
156
+ Returns:
157
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
158
+ """
159
+ cos = cos.unsqueeze(unsqueeze_dim)
160
+ sin = sin.unsqueeze(unsqueeze_dim)
161
+ q_embed = (q * cos) + (rotate_half(q) * sin)
162
+ k_embed = (k * cos) + (rotate_half(k) * sin)
163
+ return q_embed, k_embed
164
+
165
+
166
+ class GemmaMLP(nn.Module):
167
+ def __init__(self, config):
168
+ super().__init__()
169
+ self.config = config
170
+ self.hidden_size = config.hidden_size
171
+ self.intermediate_size = config.intermediate_size
172
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
173
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
174
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
175
+ if config.hidden_activation is None:
176
+ logger.warning_once(
177
+ "Gemma's activation function should be approximate GeLU and not exact GeLU.\n"
178
+ "Changing the activation function to `gelu_pytorch_tanh`."
179
+ f"if you want to use the legacy `{config.hidden_act}`, "
180
+ f"edit the `model.config` to set `hidden_activation={config.hidden_act}` "
181
+ " instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details."
182
+ )
183
+ hidden_activation = "gelu_pytorch_tanh"
184
+ else:
185
+ hidden_activation = config.hidden_activation
186
+ self.act_fn = ACT2FN[hidden_activation]
187
+
188
+ def forward(self, x):
189
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
190
+
191
+
192
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
193
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
194
+ """
195
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
196
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
197
+ """
198
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
199
+ if n_rep == 1:
200
+ return hidden_states
201
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
202
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
203
+
204
+
205
+ class GemmaAttention(nn.Module):
206
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
207
+
208
+ # Ignore copy
209
+ def __init__(self, config: GemmaConfig, layer_idx: Optional[int] = None):
210
+ super().__init__()
211
+ self.config = config
212
+ self.layer_idx = layer_idx
213
+ if layer_idx is None:
214
+ logger.warning_once(
215
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
216
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
217
+ "when creating this class."
218
+ )
219
+
220
+ self.attention_dropout = config.attention_dropout
221
+ self.hidden_size = config.hidden_size
222
+ self.num_heads = config.num_attention_heads
223
+ self.head_dim = config.head_dim
224
+ self.num_key_value_heads = config.num_key_value_heads
225
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
226
+ self.max_position_embeddings = config.max_position_embeddings
227
+ self.rope_theta = config.rope_theta
228
+ self.is_causal = True
229
+
230
+ if self.hidden_size % self.num_heads != 0:
231
+ raise ValueError(
232
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
233
+ f" and `num_heads`: {self.num_heads})."
234
+ )
235
+
236
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
237
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
238
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
239
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
240
+ self.rotary_emb = GemmaRotaryEmbedding(
241
+ self.head_dim,
242
+ max_position_embeddings=self.max_position_embeddings,
243
+ base=self.rope_theta,
244
+ )
245
+
246
+ def forward(
247
+ self,
248
+ hidden_states: torch.Tensor,
249
+ attention_mask: Optional[torch.Tensor] = None,
250
+ position_ids: Optional[torch.LongTensor] = None,
251
+ past_key_value: Optional[Cache] = None,
252
+ output_attentions: bool = False,
253
+ use_cache: bool = False,
254
+ cache_position: Optional[torch.LongTensor] = None,
255
+ **kwargs,
256
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
257
+ bsz, q_len, _ = hidden_states.size()
258
+
259
+ query_states = self.q_proj(hidden_states)
260
+ key_states = self.k_proj(hidden_states)
261
+ value_states = self.v_proj(hidden_states)
262
+
263
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
264
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
265
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
266
+
267
+ past_key_value = getattr(self, "past_key_value", past_key_value)
268
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
269
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
270
+
271
+ if past_key_value is not None:
272
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
273
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
274
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
275
+
276
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
277
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
278
+
279
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
280
+
281
+ if attention_mask is not None: # no matter the length, we just slice it
282
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
283
+ attn_weights = attn_weights + causal_mask
284
+
285
+ # upcast attention to fp32
286
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
287
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
288
+ attn_output = torch.matmul(attn_weights, value_states)
289
+
290
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
291
+ raise ValueError(
292
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
293
+ f" {attn_output.size()}"
294
+ )
295
+
296
+ attn_output = attn_output.transpose(1, 2).contiguous()
297
+
298
+ attn_output = attn_output.view(bsz, q_len, -1)
299
+ attn_output = self.o_proj(attn_output)
300
+
301
+ if not output_attentions:
302
+ attn_weights = None
303
+
304
+ return attn_output, attn_weights, past_key_value
305
+
306
+
307
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 with Llama->Gemma
308
+ class GemmaFlashAttention2(GemmaAttention):
309
+ """
310
+ Gemma flash attention module. This module inherits from `GemmaAttention` as the weights of the module stays
311
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
312
+ flash attention and deal with padding tokens in case the input contains any of them.
313
+ """
314
+
315
+ def __init__(self, *args, **kwargs):
316
+ super().__init__(*args, **kwargs)
317
+
318
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
319
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
320
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
321
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
322
+
323
+ # Ignore copy
324
+ def forward(
325
+ self,
326
+ hidden_states: torch.Tensor,
327
+ attention_mask: Optional[torch.LongTensor] = None,
328
+ position_ids: Optional[torch.LongTensor] = None,
329
+ past_key_value: Optional[Cache] = None,
330
+ output_attentions: bool = False,
331
+ use_cache: bool = False,
332
+ cache_position: Optional[torch.LongTensor] = None,
333
+ **kwargs,
334
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
335
+ output_attentions = False
336
+
337
+ bsz, q_len, _ = hidden_states.size()
338
+
339
+ query_states = self.q_proj(hidden_states)
340
+ key_states = self.k_proj(hidden_states)
341
+ value_states = self.v_proj(hidden_states)
342
+
343
+ # Flash attention requires the input to have the shape
344
+ # batch_size x seq_length x head_dim x hidden_dim
345
+ # therefore we just need to keep the original shape
346
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
347
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
348
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
349
+
350
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
351
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
352
+
353
+ past_key_value = getattr(self, "past_key_value", past_key_value)
354
+
355
+ if past_key_value is not None:
356
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
357
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
358
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
359
+
360
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
361
+ # to be able to avoid many of these transpose/reshape/view.
362
+ query_states = query_states.transpose(1, 2)
363
+ key_states = key_states.transpose(1, 2)
364
+ value_states = value_states.transpose(1, 2)
365
+
366
+ dropout_rate = self.attention_dropout if self.training else 0.0
367
+
368
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
369
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
370
+ # cast them back in the correct dtype just to be sure everything works as expected.
371
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
372
+ # in fp32. (GemmaRMSNorm handles it correctly)
373
+
374
+ input_dtype = query_states.dtype
375
+ if input_dtype == torch.float32:
376
+ if torch.is_autocast_enabled():
377
+ target_dtype = torch.get_autocast_gpu_dtype()
378
+ # Handle the case where the model is quantized
379
+ elif hasattr(self.config, "_pre_quantization_dtype"):
380
+ target_dtype = self.config._pre_quantization_dtype
381
+ else:
382
+ target_dtype = self.q_proj.weight.dtype
383
+
384
+ logger.warning_once(
385
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
386
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
387
+ f" {target_dtype}."
388
+ )
389
+
390
+ query_states = query_states.to(target_dtype)
391
+ key_states = key_states.to(target_dtype)
392
+ value_states = value_states.to(target_dtype)
393
+
394
+ attn_output = self._flash_attention_forward(
395
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
396
+ )
397
+
398
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
399
+ attn_output = self.o_proj(attn_output)
400
+
401
+ if not output_attentions:
402
+ attn_weights = None
403
+
404
+ return attn_output, attn_weights, past_key_value
405
+
406
+ def _flash_attention_forward(
407
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
408
+ ):
409
+ """
410
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
411
+ first unpad the input, then computes the attention scores and pad the final attention scores.
412
+
413
+ Args:
414
+ query_states (`torch.Tensor`):
415
+ Input query states to be passed to Flash Attention API
416
+ key_states (`torch.Tensor`):
417
+ Input key states to be passed to Flash Attention API
418
+ value_states (`torch.Tensor`):
419
+ Input value states to be passed to Flash Attention API
420
+ attention_mask (`torch.Tensor`):
421
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
422
+ position of padding tokens and 1 for the position of non-padding tokens.
423
+ dropout (`float`):
424
+ Attention dropout
425
+ softmax_scale (`float`, *optional*):
426
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
427
+ """
428
+ if not self._flash_attn_uses_top_left_mask:
429
+ causal = self.is_causal
430
+ else:
431
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in GemmaFlashAttention2 __init__.
432
+ causal = self.is_causal and query_length != 1
433
+
434
+ # Contains at least one padding token in the sequence
435
+ if attention_mask is not None:
436
+ batch_size = query_states.shape[0]
437
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
438
+ query_states, key_states, value_states, attention_mask, query_length
439
+ )
440
+
441
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
442
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
443
+
444
+ attn_output_unpad = flash_attn_varlen_func(
445
+ query_states,
446
+ key_states,
447
+ value_states,
448
+ cu_seqlens_q=cu_seqlens_q,
449
+ cu_seqlens_k=cu_seqlens_k,
450
+ max_seqlen_q=max_seqlen_in_batch_q,
451
+ max_seqlen_k=max_seqlen_in_batch_k,
452
+ dropout_p=dropout,
453
+ softmax_scale=softmax_scale,
454
+ causal=causal,
455
+ )
456
+
457
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
458
+ else:
459
+ attn_output = flash_attn_func(
460
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
461
+ )
462
+
463
+ return attn_output
464
+
465
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
466
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
467
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
468
+
469
+ key_layer = index_first_axis(
470
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
471
+ )
472
+ value_layer = index_first_axis(
473
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
474
+ )
475
+ if query_length == kv_seq_len:
476
+ query_layer = index_first_axis(
477
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
478
+ )
479
+ cu_seqlens_q = cu_seqlens_k
480
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
481
+ indices_q = indices_k
482
+ elif query_length == 1:
483
+ max_seqlen_in_batch_q = 1
484
+ cu_seqlens_q = torch.arange(
485
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
486
+ ) # There is a memcpy here, that is very bad.
487
+ indices_q = cu_seqlens_q[:-1]
488
+ query_layer = query_layer.squeeze(1)
489
+ else:
490
+ # The -q_len: slice assumes left padding.
491
+ attention_mask = attention_mask[:, -query_length:]
492
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
493
+
494
+ return (
495
+ query_layer,
496
+ key_layer,
497
+ value_layer,
498
+ indices_q,
499
+ (cu_seqlens_q, cu_seqlens_k),
500
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
501
+ )
502
+
503
+
504
+ # Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Gemma
505
+ class GemmaSdpaAttention(GemmaAttention):
506
+ """
507
+ Gemma attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
508
+ `GemmaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
509
+ SDPA API.
510
+ """
511
+
512
+ # Ignore copy
513
+ def forward(
514
+ self,
515
+ hidden_states: torch.Tensor,
516
+ attention_mask: Optional[torch.Tensor] = None,
517
+ position_ids: Optional[torch.LongTensor] = None,
518
+ past_key_value: Optional[Cache] = None,
519
+ output_attentions: bool = False,
520
+ use_cache: bool = False,
521
+ cache_position: Optional[torch.LongTensor] = None,
522
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
523
+ if output_attentions:
524
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
525
+ logger.warning_once(
526
+ "GemmaModel is using GemmaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
527
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
528
+ )
529
+ return super().forward(
530
+ hidden_states=hidden_states,
531
+ attention_mask=attention_mask,
532
+ position_ids=position_ids,
533
+ past_key_value=past_key_value,
534
+ output_attentions=output_attentions,
535
+ use_cache=use_cache,
536
+ cache_position=cache_position,
537
+ )
538
+
539
+ bsz, q_len, _ = hidden_states.size()
540
+
541
+ query_states = self.q_proj(hidden_states)
542
+ key_states = self.k_proj(hidden_states)
543
+ value_states = self.v_proj(hidden_states)
544
+
545
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
546
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
547
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
548
+
549
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
550
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
551
+
552
+ past_key_value = getattr(self, "past_key_value", past_key_value)
553
+
554
+ if past_key_value is not None:
555
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
556
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
557
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
558
+
559
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
560
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
561
+
562
+ causal_mask = attention_mask
563
+ if attention_mask is not None:
564
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
565
+
566
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
567
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
568
+ if query_states.device.type == "cuda" and causal_mask is not None:
569
+ query_states = query_states.contiguous()
570
+ key_states = key_states.contiguous()
571
+ value_states = value_states.contiguous()
572
+
573
+ # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather
574
+ # relying on the `is_causal` argument.
575
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
576
+ query_states,
577
+ key_states,
578
+ value_states,
579
+ attn_mask=causal_mask,
580
+ dropout_p=self.attention_dropout if self.training else 0.0,
581
+ is_causal=causal_mask is None and q_len > 1,
582
+ )
583
+
584
+ attn_output = attn_output.transpose(1, 2).contiguous()
585
+ attn_output = attn_output.view(bsz, q_len, -1)
586
+
587
+ attn_output = self.o_proj(attn_output)
588
+
589
+ return attn_output, None, past_key_value
590
+
591
+
592
+ GEMMA_ATTENTION_CLASSES = {
593
+ "eager": GemmaAttention,
594
+ "flash_attention_2": GemmaFlashAttention2,
595
+ "sdpa": GemmaSdpaAttention,
596
+ }
597
+
598
+
599
+ # Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->GEMMA,Llama->Gemma
600
+ class GemmaDecoderLayer(nn.Module):
601
+ def __init__(self, config: GemmaConfig, layer_idx: int):
602
+ super().__init__()
603
+ self.hidden_size = config.hidden_size
604
+
605
+ self.self_attn = GEMMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
606
+
607
+ self.mlp = GemmaMLP(config)
608
+ self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
609
+ self.post_attention_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
610
+
611
+ def forward(
612
+ self,
613
+ hidden_states: torch.Tensor,
614
+ attention_mask: Optional[torch.Tensor] = None,
615
+ position_ids: Optional[torch.LongTensor] = None,
616
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
617
+ output_attentions: Optional[bool] = False,
618
+ use_cache: Optional[bool] = False,
619
+ cache_position: Optional[torch.LongTensor] = None,
620
+ **kwargs,
621
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
622
+ """
623
+ Args:
624
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
625
+ attention_mask (`torch.FloatTensor`, *optional*):
626
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
627
+ query_sequence_length, key_sequence_length)` if default attention is used.
628
+ output_attentions (`bool`, *optional*):
629
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
630
+ returned tensors for more detail.
631
+ use_cache (`bool`, *optional*):
632
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
633
+ (see `past_key_values`).
634
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
635
+ """
636
+ if "padding_mask" in kwargs:
637
+ warnings.warn(
638
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
639
+ )
640
+
641
+ residual = hidden_states
642
+
643
+ hidden_states = self.input_layernorm(hidden_states)
644
+
645
+ # Self Attention
646
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
647
+ hidden_states=hidden_states,
648
+ attention_mask=attention_mask,
649
+ position_ids=position_ids,
650
+ past_key_value=past_key_value,
651
+ output_attentions=output_attentions,
652
+ use_cache=use_cache,
653
+ cache_position=cache_position,
654
+ **kwargs,
655
+ )
656
+ hidden_states = residual + hidden_states
657
+
658
+ # Fully Connected
659
+ residual = hidden_states
660
+ hidden_states = self.post_attention_layernorm(hidden_states)
661
+ hidden_states = self.mlp(hidden_states)
662
+ hidden_states = residual + hidden_states
663
+
664
+ outputs = (hidden_states,)
665
+
666
+ if output_attentions:
667
+ outputs += (self_attn_weights,)
668
+
669
+ if use_cache:
670
+ outputs += (present_key_value,)
671
+
672
+ return outputs
673
+
674
+
675
+ GEMMA_START_DOCSTRING = r"""
676
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
677
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
678
+ etc.)
679
+
680
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
681
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
682
+ and behavior.
683
+
684
+ Parameters:
685
+ config ([`GemmaConfig`]):
686
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
687
+ load the weights associated with the model, only the configuration. Check out the
688
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
689
+ """
690
+
691
+
692
+ @add_start_docstrings(
693
+ "The bare Gemma Model outputting raw hidden-states without any specific head on top.",
694
+ GEMMA_START_DOCSTRING,
695
+ )
696
+ class GemmaPreTrainedModel(PreTrainedModel):
697
+ config_class = GemmaConfig
698
+ base_model_prefix = "model"
699
+ supports_gradient_checkpointing = True
700
+ _keep_in_fp32_modules = ["inv_freq", "rotary_emb", "cos_cached", "sin_cached"]
701
+ _no_split_modules = ["GemmaDecoderLayer"]
702
+ _skip_keys_device_placement = ["past_key_values", "causal_mask"]
703
+ _supports_flash_attn_2 = True
704
+ _supports_sdpa = True
705
+ _supports_cache_class = True
706
+
707
+ def _init_weights(self, module):
708
+ std = self.config.initializer_range
709
+ if isinstance(module, nn.Linear):
710
+ module.weight.data.normal_(mean=0.0, std=std)
711
+ if module.bias is not None:
712
+ module.bias.data.zero_()
713
+ elif isinstance(module, nn.Embedding):
714
+ module.weight.data.normal_(mean=0.0, std=std)
715
+ if module.padding_idx is not None:
716
+ module.weight.data[module.padding_idx].zero_()
717
+
718
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
719
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
720
+ raise ValueError(
721
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
722
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
723
+ )
724
+
725
+ for layer in self.model.layers:
726
+ weights = layer.self_attn.o_proj.weight
727
+ layer.self_attn.past_key_value = cache_cls(
728
+ self.config, max_batch_size, max_cache_len, device=weights.device, dtype=weights.dtype
729
+ )
730
+
731
+ def _reset_cache(self):
732
+ for layer in self.model.layers:
733
+ layer.self_attn.past_key_value = None
734
+
735
+
736
+ GEMMA_INPUTS_DOCSTRING = r"""
737
+ Args:
738
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
739
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
740
+ it.
741
+
742
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
743
+ [`PreTrainedTokenizer.__call__`] for details.
744
+
745
+ [What are input IDs?](../glossary#input-ids)
746
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
747
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
748
+
749
+ - 1 for tokens that are **not masked**,
750
+ - 0 for tokens that are **masked**.
751
+
752
+ [What are attention masks?](../glossary#attention-mask)
753
+
754
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
755
+ [`PreTrainedTokenizer.__call__`] for details.
756
+
757
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
758
+ `past_key_values`).
759
+
760
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
761
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
762
+ information on the default strategy.
763
+
764
+ - 1 indicates the head is **not masked**,
765
+ - 0 indicates the head is **masked**.
766
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
767
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
768
+ config.n_positions - 1]`.
769
+
770
+ [What are position IDs?](../glossary#position-ids)
771
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
772
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
773
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
774
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
775
+
776
+ Two formats are allowed:
777
+ - a [`~cache_utils.Cache`] instance;
778
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
779
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
780
+ cache format.
781
+
782
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
783
+ legacy cache format will be returned.
784
+
785
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
786
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
787
+ of shape `(batch_size, sequence_length)`.
788
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
789
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
790
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
791
+ model's internal embedding lookup matrix.
792
+ use_cache (`bool`, *optional*):
793
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
794
+ `past_key_values`).
795
+ output_attentions (`bool`, *optional*):
796
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
797
+ tensors for more detail.
798
+ output_hidden_states (`bool`, *optional*):
799
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
800
+ more detail.
801
+ return_dict (`bool`, *optional*):
802
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
803
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
804
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
805
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
806
+ the complete sequence length.
807
+ """
808
+
809
+
810
+ @add_start_docstrings(
811
+ "The bare Gemma Model outputting raw hidden-states without any specific head on top.",
812
+ GEMMA_START_DOCSTRING,
813
+ )
814
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel with LLAMA->GEMMA,Llama->Gemma
815
+ class GemmaModel(GemmaPreTrainedModel):
816
+ """
817
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GemmaDecoderLayer`]
818
+
819
+ Args:
820
+ config: GemmaConfig
821
+ """
822
+
823
+ def __init__(self, config: GemmaConfig):
824
+ super().__init__(config)
825
+ self.padding_idx = config.pad_token_id
826
+ self.vocab_size = config.vocab_size
827
+
828
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
829
+ self.layers = nn.ModuleList(
830
+ [GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
831
+ )
832
+ self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
833
+ self.gradient_checkpointing = False
834
+
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ def get_input_embeddings(self):
839
+ return self.embed_tokens
840
+
841
+ def set_input_embeddings(self, value):
842
+ self.embed_tokens = value
843
+
844
+ @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
845
+ # Ignore copy
846
+ def forward(
847
+ self,
848
+ input_ids: torch.LongTensor = None,
849
+ attention_mask: Optional[torch.Tensor] = None,
850
+ position_ids: Optional[torch.LongTensor] = None,
851
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
852
+ inputs_embeds: Optional[torch.FloatTensor] = None,
853
+ use_cache: Optional[bool] = None,
854
+ output_attentions: Optional[bool] = None,
855
+ output_hidden_states: Optional[bool] = None,
856
+ return_dict: Optional[bool] = None,
857
+ cache_position: Optional[torch.LongTensor] = None,
858
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
859
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
860
+ output_hidden_states = (
861
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
862
+ )
863
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
864
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
865
+
866
+ if (input_ids is None) ^ (inputs_embeds is not None):
867
+ raise ValueError(
868
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
869
+ )
870
+
871
+ if self.gradient_checkpointing and self.training and use_cache:
872
+ logger.warning_once(
873
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
874
+ )
875
+ use_cache = False
876
+
877
+ if inputs_embeds is None:
878
+ inputs_embeds = self.embed_tokens(input_ids)
879
+
880
+ past_seen_tokens = 0
881
+ if use_cache: # kept for BC (cache positions)
882
+ if not isinstance(past_key_values, StaticCache):
883
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
884
+ past_seen_tokens = past_key_values.get_seq_length()
885
+
886
+ if cache_position is None:
887
+ cache_position = torch.arange(
888
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
889
+ )
890
+
891
+ if position_ids is None:
892
+ position_ids = cache_position.unsqueeze(0)
893
+
894
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens)
895
+
896
+ # embed positions
897
+ hidden_states = inputs_embeds
898
+
899
+ # normalized
900
+ # Gemma downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
901
+ # See https://github.com/huggingface/transformers/pull/29402
902
+ normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
903
+ hidden_states = hidden_states * normalizer
904
+
905
+ # decoder layers
906
+ all_hidden_states = () if output_hidden_states else None
907
+ all_self_attns = () if output_attentions else None
908
+ next_decoder_cache = None
909
+
910
+ for decoder_layer in self.layers:
911
+ if output_hidden_states:
912
+ all_hidden_states += (hidden_states,)
913
+
914
+ if self.gradient_checkpointing and self.training:
915
+ layer_outputs = self._gradient_checkpointing_func(
916
+ decoder_layer.__call__,
917
+ hidden_states,
918
+ causal_mask,
919
+ position_ids,
920
+ past_key_values,
921
+ output_attentions,
922
+ use_cache,
923
+ cache_position,
924
+ )
925
+ else:
926
+ layer_outputs = decoder_layer(
927
+ hidden_states,
928
+ attention_mask=causal_mask,
929
+ position_ids=position_ids,
930
+ past_key_value=past_key_values,
931
+ output_attentions=output_attentions,
932
+ use_cache=use_cache,
933
+ cache_position=cache_position,
934
+ )
935
+
936
+ hidden_states = layer_outputs[0]
937
+
938
+ if use_cache:
939
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
940
+
941
+ if output_attentions:
942
+ all_self_attns += (layer_outputs[1],)
943
+
944
+ hidden_states = self.norm(hidden_states)
945
+
946
+ # add hidden states from the last decoder layer
947
+ if output_hidden_states:
948
+ all_hidden_states += (hidden_states,)
949
+
950
+ next_cache = None
951
+ if use_cache:
952
+ next_cache = (
953
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
954
+ )
955
+ if not return_dict:
956
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
957
+ return BaseModelOutputWithPast(
958
+ last_hidden_state=hidden_states,
959
+ past_key_values=next_cache,
960
+ hidden_states=all_hidden_states,
961
+ attentions=all_self_attns,
962
+ )
963
+
964
+ def _update_causal_mask(
965
+ self,
966
+ attention_mask: torch.Tensor,
967
+ input_tensor: torch.Tensor,
968
+ cache_position: torch.Tensor,
969
+ past_seen_tokens: int,
970
+ ):
971
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
972
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
973
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
974
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
975
+
976
+ if self.config._attn_implementation == "flash_attention_2":
977
+ if attention_mask is not None and 0.0 in attention_mask:
978
+ return attention_mask
979
+ return None
980
+
981
+ if self.config._attn_implementation == "sdpa":
982
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument,
983
+ # in order to dispatch on Flash Attention 2.
984
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
985
+ attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens
986
+ ):
987
+ return None
988
+
989
+ dtype, device = input_tensor.dtype, input_tensor.device
990
+ min_dtype = torch.finfo(dtype).min
991
+ sequence_length = input_tensor.shape[1]
992
+ if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache
993
+ target_length = self.config.max_position_embeddings
994
+ else: # dynamic cache
995
+ target_length = (
996
+ attention_mask.shape[-1]
997
+ if isinstance(attention_mask, torch.Tensor)
998
+ else past_seen_tokens + sequence_length + 1
999
+ )
1000
+
1001
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1002
+ if sequence_length != 1:
1003
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1004
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1005
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1006
+ if attention_mask is not None:
1007
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1008
+ if attention_mask.dim() == 2:
1009
+ mask_length = attention_mask.shape[-1]
1010
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1011
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1012
+ elif attention_mask.dim() == 4:
1013
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
1014
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
1015
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
1016
+ offset = cache_position[0]
1017
+ else:
1018
+ offset = 0
1019
+ mask_shape = attention_mask.shape
1020
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
1021
+ causal_mask[
1022
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
1023
+ ] = mask_slice
1024
+
1025
+ if (
1026
+ self.config._attn_implementation == "sdpa"
1027
+ and attention_mask is not None
1028
+ and attention_mask.device.type == "cuda"
1029
+ ):
1030
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1031
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1032
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1033
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1034
+
1035
+ return causal_mask
1036
+
1037
+
1038
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->GEMMA,Llama->Gemma,llama->gemma
1039
+ class GemmaForCausalLM(GemmaPreTrainedModel):
1040
+ _tied_weights_keys = ["lm_head.weight"]
1041
+
1042
+ def __init__(self, config):
1043
+ super().__init__(config)
1044
+ self.model = GemmaModel(config)
1045
+ self.vocab_size = config.vocab_size
1046
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1047
+
1048
+ # Initialize weights and apply final processing
1049
+ self.post_init()
1050
+
1051
+ def get_input_embeddings(self):
1052
+ return self.model.embed_tokens
1053
+
1054
+ def set_input_embeddings(self, value):
1055
+ self.model.embed_tokens = value
1056
+
1057
+ def get_output_embeddings(self):
1058
+ return self.lm_head
1059
+
1060
+ def set_output_embeddings(self, new_embeddings):
1061
+ self.lm_head = new_embeddings
1062
+
1063
+ def set_decoder(self, decoder):
1064
+ self.model = decoder
1065
+
1066
+ def get_decoder(self):
1067
+ return self.model
1068
+
1069
+ # Ignore copy
1070
+ @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
1071
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1072
+ def forward(
1073
+ self,
1074
+ input_ids: torch.LongTensor = None,
1075
+ attention_mask: Optional[torch.Tensor] = None,
1076
+ position_ids: Optional[torch.LongTensor] = None,
1077
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1078
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1079
+ labels: Optional[torch.LongTensor] = None,
1080
+ use_cache: Optional[bool] = None,
1081
+ output_attentions: Optional[bool] = None,
1082
+ output_hidden_states: Optional[bool] = None,
1083
+ return_dict: Optional[bool] = None,
1084
+ cache_position: Optional[torch.LongTensor] = None,
1085
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1086
+ r"""
1087
+ Args:
1088
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1089
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1090
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1091
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1092
+
1093
+ Returns:
1094
+
1095
+ Example:
1096
+
1097
+ ```python
1098
+ >>> from transformers import AutoTokenizer, GemmaForCausalLM
1099
+
1100
+ >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b")
1101
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
1102
+
1103
+ >>> prompt = "What is your favorite condiment?"
1104
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1105
+
1106
+ >>> # Generate
1107
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1108
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1109
+ "What is your favorite condiment?"
1110
+ ```"""
1111
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1112
+ output_hidden_states = (
1113
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1114
+ )
1115
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1116
+
1117
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1118
+ outputs = self.model(
1119
+ input_ids=input_ids,
1120
+ attention_mask=attention_mask,
1121
+ position_ids=position_ids,
1122
+ past_key_values=past_key_values,
1123
+ inputs_embeds=inputs_embeds,
1124
+ use_cache=use_cache,
1125
+ output_attentions=output_attentions,
1126
+ output_hidden_states=output_hidden_states,
1127
+ return_dict=return_dict,
1128
+ cache_position=cache_position,
1129
+ )
1130
+
1131
+ hidden_states = outputs[0]
1132
+ logits = self.lm_head(hidden_states)
1133
+ logits = logits.float()
1134
+ loss = None
1135
+ if labels is not None:
1136
+ # Shift so that tokens < n predict n
1137
+ shift_logits = logits[..., :-1, :].contiguous()
1138
+ shift_labels = labels[..., 1:].contiguous()
1139
+ # Flatten the tokens
1140
+ loss_fct = CrossEntropyLoss()
1141
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1142
+ shift_labels = shift_labels.view(-1)
1143
+ # Enable model parallelism
1144
+ shift_labels = shift_labels.to(shift_logits.device)
1145
+ loss = loss_fct(shift_logits, shift_labels)
1146
+
1147
+ if not return_dict:
1148
+ output = (logits,) + outputs[1:]
1149
+ return (loss,) + output if loss is not None else output
1150
+
1151
+ return CausalLMOutputWithPast(
1152
+ loss=loss,
1153
+ logits=logits,
1154
+ past_key_values=outputs.past_key_values,
1155
+ hidden_states=outputs.hidden_states,
1156
+ attentions=outputs.attentions,
1157
+ )
1158
+
1159
+ def prepare_inputs_for_generation(
1160
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
1161
+ ):
1162
+ # With static cache, the `past_key_values` is None
1163
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
1164
+ has_static_cache = False
1165
+ if past_key_values is None:
1166
+ past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None)
1167
+ has_static_cache = past_key_values is not None
1168
+
1169
+ past_length = 0
1170
+ if past_key_values is not None:
1171
+ if isinstance(past_key_values, Cache):
1172
+ past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
1173
+ max_cache_length = (
1174
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
1175
+ if past_key_values.get_max_length() is not None
1176
+ else None
1177
+ )
1178
+ cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
1179
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
1180
+ else:
1181
+ cache_length = past_length = past_key_values[0][0].shape[2]
1182
+ max_cache_length = None
1183
+
1184
+ # Keep only the unprocessed tokens:
1185
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1186
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1187
+ # input)
1188
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1189
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1190
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1191
+ # input_ids based on the past_length.
1192
+ elif past_length < input_ids.shape[1]:
1193
+ input_ids = input_ids[:, past_length:]
1194
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1195
+
1196
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1197
+ if (
1198
+ max_cache_length is not None
1199
+ and attention_mask is not None
1200
+ and cache_length + input_ids.shape[1] > max_cache_length
1201
+ ):
1202
+ attention_mask = attention_mask[:, -max_cache_length:]
1203
+
1204
+ position_ids = kwargs.get("position_ids", None)
1205
+ if attention_mask is not None and position_ids is None:
1206
+ # create position_ids on the fly for batch generation
1207
+ position_ids = attention_mask.long().cumsum(-1) - 1
1208
+ position_ids.masked_fill_(attention_mask == 0, 1)
1209
+ if past_key_values:
1210
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1211
+
1212
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1213
+ if inputs_embeds is not None and past_key_values is None:
1214
+ model_inputs = {"inputs_embeds": inputs_embeds}
1215
+ else:
1216
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1217
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1218
+ # TODO: use `next_tokens` directly instead.
1219
+ model_inputs = {"input_ids": input_ids.contiguous()}
1220
+
1221
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1222
+ if cache_position is None:
1223
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1224
+ else:
1225
+ cache_position = cache_position[-input_length:]
1226
+
1227
+ if has_static_cache:
1228
+ past_key_values = None
1229
+
1230
+ model_inputs.update(
1231
+ {
1232
+ "position_ids": position_ids,
1233
+ "cache_position": cache_position,
1234
+ "past_key_values": past_key_values,
1235
+ "use_cache": kwargs.get("use_cache"),
1236
+ "attention_mask": attention_mask,
1237
+ }
1238
+ )
1239
+ return model_inputs
1240
+
1241
+ @staticmethod
1242
+ def _reorder_cache(past_key_values, beam_idx):
1243
+ reordered_past = ()
1244
+ for layer_past in past_key_values:
1245
+ reordered_past += (
1246
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1247
+ )
1248
+ return reordered_past
1249
+
1250
+
1251
+ @add_start_docstrings(
1252
+ """
1253
+ The Gemma Model transformer with a sequence classification head on top (linear layer).
1254
+
1255
+ [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1256
+ (e.g. GPT-2) do.
1257
+
1258
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1259
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1260
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1261
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1262
+ each row of the batch).
1263
+ """,
1264
+ GEMMA_START_DOCSTRING,
1265
+ )
1266
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->GEMMA,Llama->Gemma
1267
+ class GemmaForSequenceClassification(GemmaPreTrainedModel):
1268
+ def __init__(self, config):
1269
+ super().__init__(config)
1270
+ self.num_labels = config.num_labels
1271
+ self.model = GemmaModel(config)
1272
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1273
+
1274
+ # Initialize weights and apply final processing
1275
+ self.post_init()
1276
+
1277
+ def get_input_embeddings(self):
1278
+ return self.model.embed_tokens
1279
+
1280
+ def set_input_embeddings(self, value):
1281
+ self.model.embed_tokens = value
1282
+
1283
+ @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
1284
+ def forward(
1285
+ self,
1286
+ input_ids: torch.LongTensor = None,
1287
+ attention_mask: Optional[torch.Tensor] = None,
1288
+ position_ids: Optional[torch.LongTensor] = None,
1289
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1290
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1291
+ labels: Optional[torch.LongTensor] = None,
1292
+ use_cache: Optional[bool] = None,
1293
+ output_attentions: Optional[bool] = None,
1294
+ output_hidden_states: Optional[bool] = None,
1295
+ return_dict: Optional[bool] = None,
1296
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1297
+ r"""
1298
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1299
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1300
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1301
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1302
+ """
1303
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1304
+
1305
+ transformer_outputs = self.model(
1306
+ input_ids,
1307
+ attention_mask=attention_mask,
1308
+ position_ids=position_ids,
1309
+ past_key_values=past_key_values,
1310
+ inputs_embeds=inputs_embeds,
1311
+ use_cache=use_cache,
1312
+ output_attentions=output_attentions,
1313
+ output_hidden_states=output_hidden_states,
1314
+ return_dict=return_dict,
1315
+ )
1316
+ hidden_states = transformer_outputs[0]
1317
+ logits = self.score(hidden_states)
1318
+
1319
+ if input_ids is not None:
1320
+ batch_size = input_ids.shape[0]
1321
+ else:
1322
+ batch_size = inputs_embeds.shape[0]
1323
+
1324
+ if self.config.pad_token_id is None and batch_size != 1:
1325
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1326
+ if self.config.pad_token_id is None:
1327
+ sequence_lengths = -1
1328
+ else:
1329
+ if input_ids is not None:
1330
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1331
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1332
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1333
+ sequence_lengths = sequence_lengths.to(logits.device)
1334
+ else:
1335
+ sequence_lengths = -1
1336
+
1337
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1338
+
1339
+ loss = None
1340
+ if labels is not None:
1341
+ labels = labels.to(logits.device)
1342
+ if self.config.problem_type is None:
1343
+ if self.num_labels == 1:
1344
+ self.config.problem_type = "regression"
1345
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1346
+ self.config.problem_type = "single_label_classification"
1347
+ else:
1348
+ self.config.problem_type = "multi_label_classification"
1349
+
1350
+ if self.config.problem_type == "regression":
1351
+ loss_fct = MSELoss()
1352
+ if self.num_labels == 1:
1353
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1354
+ else:
1355
+ loss = loss_fct(pooled_logits, labels)
1356
+ elif self.config.problem_type == "single_label_classification":
1357
+ loss_fct = CrossEntropyLoss()
1358
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1359
+ elif self.config.problem_type == "multi_label_classification":
1360
+ loss_fct = BCEWithLogitsLoss()
1361
+ loss = loss_fct(pooled_logits, labels)
1362
+ if not return_dict:
1363
+ output = (pooled_logits,) + transformer_outputs[1:]
1364
+ return ((loss,) + output) if loss is not None else output
1365
+
1366
+ return SequenceClassifierOutputWithPast(
1367
+ loss=loss,
1368
+ logits=pooled_logits,
1369
+ past_key_values=transformer_outputs.past_key_values,
1370
+ hidden_states=transformer_outputs.hidden_states,
1371
+ attentions=transformer_outputs.attentions,
1372
+ )
venv/lib/python3.10/site-packages/transformers/models/gemma/tokenization_gemma.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Tokenization classes for Gemma."""
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
20
+
21
+ import sentencepiece as spm
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ pass
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
33
+
34
+ SPIECE_UNDERLINE = "▁"
35
+
36
+
37
+ class GemmaTokenizer(PreTrainedTokenizer):
38
+ """
39
+ Construct a Gemma tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
40
+ no padding token in the original model.
41
+
42
+ Args:
43
+ vocab_file (`str`):
44
+ Path to the vocabulary file.
45
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
46
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
47
+ token instead.
48
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`):
49
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
50
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`):
51
+ The end of sequence token.
52
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`):
53
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
54
+ attention mechanisms or loss computation.
55
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
56
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
57
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
58
+ to set:
59
+
60
+ - `enable_sampling`: Enable subword regularization.
61
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
62
+
63
+ - `nbest_size = {0,1}`: No sampling is performed.
64
+ - `nbest_size > 1`: samples from the nbest_size results.
65
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
66
+ using forward-filtering-and-backward-sampling algorithm.
67
+
68
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
69
+ BPE-dropout.
70
+
71
+ add_bos_token (`bool`, *optional*, defaults to `True`):
72
+ Whether or not to add an `bos_token` at the start of sequences.
73
+ add_eos_token (`bool`, *optional*, defaults to `False`):
74
+ Whether or not to add an `eos_token` at the end of sequences.
75
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
76
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
77
+ extra spaces.
78
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
79
+ Whether or not the default system prompt for Gemma should be used.
80
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
81
+ Whether or not to add spaces between special tokens.
82
+ """
83
+
84
+ vocab_files_names = VOCAB_FILES_NAMES
85
+ model_input_names = ["input_ids", "attention_mask"]
86
+
87
+ def __init__(
88
+ self,
89
+ vocab_file,
90
+ unk_token="<unk>",
91
+ bos_token="<bos>",
92
+ eos_token="<eos>",
93
+ pad_token="<pad>",
94
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
95
+ add_bos_token=True,
96
+ add_eos_token=False,
97
+ clean_up_tokenization_spaces=False,
98
+ use_default_system_prompt=False,
99
+ spaces_between_special_tokens=False,
100
+ **kwargs,
101
+ ):
102
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
103
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
104
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
105
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
106
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
107
+
108
+ self.vocab_file = vocab_file
109
+ self.add_bos_token = add_bos_token
110
+ self.add_eos_token = add_eos_token
111
+ self.use_default_system_prompt = use_default_system_prompt
112
+
113
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
114
+ self.sp_model.Load(vocab_file)
115
+
116
+ super().__init__(
117
+ bos_token=bos_token,
118
+ eos_token=eos_token,
119
+ unk_token=unk_token,
120
+ pad_token=pad_token,
121
+ add_bos_token=add_bos_token,
122
+ add_eos_token=add_eos_token,
123
+ sp_model_kwargs=self.sp_model_kwargs,
124
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
125
+ use_default_system_prompt=use_default_system_prompt,
126
+ spaces_between_special_tokens=spaces_between_special_tokens,
127
+ **kwargs,
128
+ )
129
+
130
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.__getstate__
131
+ def __getstate__(self):
132
+ state = self.__dict__.copy()
133
+ state["sp_model"] = None
134
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
135
+ return state
136
+
137
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.__setstate__
138
+ def __setstate__(self, d):
139
+ self.__dict__ = d
140
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
141
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
142
+
143
+ @property
144
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.vocab_size
145
+ def vocab_size(self):
146
+ """Returns vocab size"""
147
+ return self.sp_model.get_piece_size()
148
+
149
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_vocab
150
+ def get_vocab(self):
151
+ """Returns vocab as a dict"""
152
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
153
+ vocab.update(self.added_tokens_encoder)
154
+ return vocab
155
+
156
+ def _tokenize(self, text, **kwargs):
157
+ """
158
+ Returns a tokenized string. The Gemma tokenizer never adds a prefix space.
159
+ """
160
+ return self.sp_model.encode(text, out_type=str)
161
+
162
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_token_to_id
163
+ def _convert_token_to_id(self, token):
164
+ """Converts a token (str) in an id using the vocab."""
165
+ return self.sp_model.piece_to_id(token)
166
+
167
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_id_to_token
168
+ def _convert_id_to_token(self, index):
169
+ """Converts an index (integer) in a token (str) using the vocab."""
170
+ token = self.sp_model.IdToPiece(index)
171
+ return token
172
+
173
+ def _decode(
174
+ self,
175
+ token_ids: List[int],
176
+ skip_special_tokens: bool = False,
177
+ spaces_between_special_tokens: bool = False,
178
+ **kwargs,
179
+ ) -> str:
180
+ sub_texts = []
181
+ current_sub_text = []
182
+ for ids in token_ids:
183
+ if skip_special_tokens and ids in self.all_special_ids:
184
+ continue
185
+ if ids in self._added_tokens_decoder:
186
+ if current_sub_text:
187
+ sub_texts.append(self.sp_model.decode(current_sub_text))
188
+ sub_texts.append(self._added_tokens_decoder[ids].content)
189
+ current_sub_text = []
190
+ else:
191
+ current_sub_text.append(ids)
192
+ if current_sub_text:
193
+ sub_texts.append(self.sp_model.decode(current_sub_text))
194
+
195
+ if spaces_between_special_tokens:
196
+ sub_texts = " ".join(sub_texts)
197
+ else:
198
+ sub_texts = "".join(sub_texts)
199
+
200
+ return sub_texts
201
+
202
+ def convert_tokens_to_string(self, tokens):
203
+ """Converts a sequence of tokens (string) in a single string."""
204
+ current_sub_tokens = []
205
+ out_string = ""
206
+ for token in tokens:
207
+ # make sure that special tokens are not decoded using sentencepiece model
208
+ if token in self._added_tokens_encoder:
209
+ out_string += self.sp_model.decode(current_sub_tokens) + token
210
+ current_sub_tokens = []
211
+ else:
212
+ current_sub_tokens.append(token)
213
+ out_string += self.sp_model.decode(current_sub_tokens)
214
+ return out_string
215
+
216
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.save_vocabulary
217
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
218
+ """
219
+ Save the vocabulary and special tokens file to a directory.
220
+
221
+ Args:
222
+ save_directory (`str`):
223
+ The directory in which to save the vocabulary.
224
+
225
+ Returns:
226
+ `Tuple(str)`: Paths to the files saved.
227
+ """
228
+ if not os.path.isdir(save_directory):
229
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
230
+ return
231
+ out_vocab_file = os.path.join(
232
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
233
+ )
234
+
235
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
236
+ copyfile(self.vocab_file, out_vocab_file)
237
+ elif not os.path.isfile(self.vocab_file):
238
+ with open(out_vocab_file, "wb") as fi:
239
+ content_spiece_model = self.sp_model.serialized_model_proto()
240
+ fi.write(content_spiece_model)
241
+
242
+ return (out_vocab_file,)
243
+
244
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
245
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
246
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
247
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
248
+
249
+ output = bos_token_id + token_ids_0 + eos_token_id
250
+
251
+ if token_ids_1 is not None:
252
+ output = output + bos_token_id + token_ids_1 + eos_token_id
253
+
254
+ return output
255
+
256
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+ if already_has_special_tokens:
276
+ return super().get_special_tokens_mask(
277
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
278
+ )
279
+
280
+ bos_token_id = [1] if self.add_bos_token else []
281
+ eos_token_id = [1] if self.add_eos_token else []
282
+
283
+ if token_ids_1 is None:
284
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
285
+ return (
286
+ bos_token_id
287
+ + ([0] * len(token_ids_0))
288
+ + eos_token_id
289
+ + bos_token_id
290
+ + ([0] * len(token_ids_1))
291
+ + eos_token_id
292
+ )
293
+
294
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.create_token_type_ids_from_sequences
295
+ def create_token_type_ids_from_sequences(
296
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
297
+ ) -> List[int]:
298
+ """
299
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
300
+ sequence pair mask has the following format:
301
+
302
+ ```
303
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
304
+ | first sequence | second sequence |
305
+ ```
306
+
307
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
308
+
309
+ Args:
310
+ token_ids_0 (`List[int]`):
311
+ List of ids.
312
+ token_ids_1 (`List[int]`, *optional*):
313
+ Optional second list of IDs for sequence pairs.
314
+
315
+ Returns:
316
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
317
+ """
318
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
319
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
320
+
321
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
322
+
323
+ if token_ids_1 is not None:
324
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
325
+
326
+ return output
venv/lib/python3.10/site-packages/transformers/models/gemma/tokenization_gemma_fast.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from shutil import copyfile
17
+ from typing import Optional, Tuple
18
+
19
+ from tokenizers import processors
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import is_sentencepiece_available, logging
23
+ from ...utils.versions import require_version
24
+
25
+
26
+ require_version("tokenizers>=0.13.3")
27
+
28
+ if is_sentencepiece_available():
29
+ from .tokenization_gemma import GemmaTokenizer
30
+ else:
31
+ GemmaTokenizer = None
32
+
33
+ logger = logging.get_logger(__name__)
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+
37
+ class GemmaTokenizerFast(PreTrainedTokenizerFast):
38
+ """
39
+ Construct a Gemma tokenizer fast. Based on byte-level Byte-Pair-Encoding.
40
+
41
+ This uses notably ByteFallback and no prefix space. Normalization is applied to replace `" "` with `"▁"`
42
+
43
+ ```python
44
+ >>> from transformers import GemmaTokenizerFast
45
+
46
+ >>> tokenizer = GemmaTokenizerFast.from_pretrained("hf-internal-testing/dummy-gemma")
47
+ >>> tokenizer.encode("Hello this is a test")
48
+ [2, 4521, 736, 603, 476, 2121]
49
+ ```
50
+
51
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
52
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
53
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
54
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
55
+
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
58
+ refer to this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`, *optional*):
62
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
63
+ contains the vocabulary necessary to instantiate a tokenizer.
64
+ tokenizer_file (`str`, *optional*):
65
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
66
+ contains everything needed to load the tokenizer.
67
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
69
+ extra spaces.
70
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`):
74
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
75
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`):
76
+ The end of sequence token.
77
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
78
+ The padding token
79
+ add_bos_token (`bool`, *optional*, defaults to `True`):
80
+ Whether or not to add an `bos_token` at the start of sequences.
81
+ add_eos_token (`bool`, *optional*, defaults to `False`):
82
+ Whether or not to add an `eos_token` at the end of sequences.
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ slow_tokenizer_class = GemmaTokenizer
87
+ padding_side = "left"
88
+ model_input_names = ["input_ids", "attention_mask"]
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_file=None,
93
+ tokenizer_file=None,
94
+ clean_up_tokenization_spaces=False,
95
+ unk_token="<unk>",
96
+ bos_token="<bos>",
97
+ eos_token="<eos>",
98
+ pad_token="<pad>",
99
+ add_bos_token=True,
100
+ add_eos_token=False,
101
+ **kwargs,
102
+ ):
103
+ super().__init__(
104
+ vocab_file=vocab_file,
105
+ tokenizer_file=tokenizer_file,
106
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
107
+ unk_token=unk_token,
108
+ bos_token=bos_token,
109
+ eos_token=eos_token,
110
+ pad_token=pad_token,
111
+ add_bos_token=add_bos_token,
112
+ add_eos_token=add_eos_token,
113
+ **kwargs,
114
+ )
115
+ self._add_bos_token = add_bos_token
116
+ self._add_eos_token = add_eos_token
117
+ self.update_post_processor()
118
+ self.vocab_file = vocab_file
119
+
120
+ @property
121
+ def can_save_slow_tokenizer(self) -> bool:
122
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
123
+
124
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
125
+ def update_post_processor(self):
126
+ """
127
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
128
+ """
129
+ bos = self.bos_token
130
+ bos_token_id = self.bos_token_id
131
+ if bos is None and self.add_bos_token:
132
+ raise ValueError("add_bos_token = True but bos_token = None")
133
+
134
+ eos = self.eos_token
135
+ eos_token_id = self.eos_token_id
136
+ if eos is None and self.add_eos_token:
137
+ raise ValueError("add_eos_token = True but eos_token = None")
138
+
139
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
140
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
141
+
142
+ special_tokens = []
143
+ if self.add_bos_token:
144
+ special_tokens.append((bos, bos_token_id))
145
+ if self.add_eos_token:
146
+ special_tokens.append((eos, eos_token_id))
147
+ self._tokenizer.post_processor = processors.TemplateProcessing(
148
+ single=single, pair=pair, special_tokens=special_tokens
149
+ )
150
+
151
+ @property
152
+ def add_eos_token(self):
153
+ return self._add_eos_token
154
+
155
+ @property
156
+ def add_bos_token(self):
157
+ return self._add_bos_token
158
+
159
+ @add_eos_token.setter
160
+ def add_eos_token(self, value):
161
+ self._add_eos_token = value
162
+ self.update_post_processor()
163
+
164
+ @add_bos_token.setter
165
+ def add_bos_token(self, value):
166
+ self._add_bos_token = value
167
+ self.update_post_processor()
168
+
169
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary
170
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
171
+ if not self.can_save_slow_tokenizer:
172
+ raise ValueError(
173
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
174
+ "tokenizer."
175
+ )
176
+
177
+ if not os.path.isdir(save_directory):
178
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
179
+ return
180
+ out_vocab_file = os.path.join(
181
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
182
+ )
183
+
184
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
185
+ copyfile(self.vocab_file, out_vocab_file)
186
+
187
+ return (out_vocab_file,)
188
+
189
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.build_inputs_with_special_tokens
190
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
191
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
192
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
193
+
194
+ output = bos_token_id + token_ids_0 + eos_token_id
195
+
196
+ if token_ids_1 is not None:
197
+ output = output + bos_token_id + token_ids_1 + eos_token_id
198
+
199
+ return output
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"]
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ from .tokenization_gpt_sw3 import GPTSw3Tokenizer
39
+
40
+ else:
41
+ import sys
42
+
43
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (695 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team and the AI-Sweden team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Convert GPT-SW3 megatron checkpoints to pytorch"""
15
+
16
+ import argparse
17
+ import os
18
+ from os.path import isfile
19
+
20
+ import torch
21
+
22
+ from transformers import GPT2Config
23
+
24
+
25
+ def recursive_print(name, val, spaces=0):
26
+ # Format the message.
27
+ if name is None:
28
+ msg = None
29
+ else:
30
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
31
+ msg = fmt.format(name)
32
+
33
+ # Print and recurse (if needed).
34
+ if isinstance(val, dict):
35
+ if msg is not None:
36
+ print(msg)
37
+ for k in val.keys():
38
+ recursive_print(k, val[k], spaces + 2)
39
+ elif isinstance(val, torch.Tensor):
40
+ print(msg, ":", val.size())
41
+ else:
42
+ print(msg, ":", val)
43
+
44
+
45
+ def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size):
46
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
47
+ # for compatibility with later versions of NVIDIA Megatron-LM.
48
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
49
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
50
+ # If param is the weight tensor of the self-attention block, the returned tensor
51
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
52
+ input_shape = param.size()
53
+ # other versions store [num_heads * num_splits * hidden_size, :]
54
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
55
+ param = param.view(*saved_shape)
56
+ param = param.transpose(0, 1).contiguous()
57
+ param = param.view(*input_shape)
58
+ return param
59
+
60
+
61
+ def convert_megatron_checkpoint(sd_megatron, config):
62
+ """
63
+ Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint.
64
+ """
65
+ n_positions = config.n_positions
66
+ layers = config.n_layer
67
+ vocab_size = config.vocab_size
68
+ heads = config.n_head
69
+ hidden_size_per_head = config.n_embd // config.n_head
70
+
71
+ word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :]
72
+ sd_hf = {
73
+ "transformer.wte.weight": word_embeddings,
74
+ "transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"],
75
+ "transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"],
76
+ "transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"],
77
+ }
78
+
79
+ pf = "model.language_model.encoder.layers."
80
+ for i in range(layers):
81
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool))
82
+ causal_mask = causal_mask.view(1, 1, n_positions, n_positions)
83
+ sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask
84
+ sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16)
85
+
86
+ sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"]
87
+ sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"]
88
+
89
+ val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"]
90
+ val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head)
91
+ sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous()
92
+
93
+ val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"]
94
+ val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head)
95
+ sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2
96
+
97
+ sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose(
98
+ 0, 1
99
+ )
100
+ sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"]
101
+ sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"]
102
+ sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"]
103
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1)
104
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"]
105
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose(
106
+ 0, 1
107
+ )
108
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"]
109
+
110
+ # For LM head, transformers' wants the matrix to weight embeddings.
111
+ sd_hf["lm_head.weight"] = word_embeddings
112
+
113
+ return sd_hf
114
+
115
+
116
+ def copy_config(config_hf, config_megatron):
117
+ """Copy the config from Megatron to hf."""
118
+ config_hf.vocab_size = 64000
119
+ config_hf.n_positions = config_megatron["encoder_seq_length"]
120
+ config_hf.n_embd = config_megatron["hidden_size"]
121
+ config_hf.n_layer = config_megatron["num_layers"]
122
+ config_hf.n_head = config_megatron["num_attention_heads"]
123
+ config_hf.n_inner = config_megatron["ffn_hidden_size"]
124
+ config_hf.activation_function = "gelu"
125
+ config_hf.resid_pdrop = 0.1
126
+ config_hf.embd_pdrop = 0.1
127
+ config_hf.attn_pdrop = 0.1
128
+ config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5
129
+ config_hf.initializer_range = config_megatron["init_method_std"] # 0.02
130
+ config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True
131
+ config_hf.normalize_attention_scores = True
132
+ config_hf.use_cache = True
133
+
134
+ # This identifies the 6.7B (7B) model which uses a different tokenizer
135
+ if config_megatron["hidden_size"] == 4096:
136
+ config_hf.bos_token_id = 1 # <|endoftext|>
137
+ config_hf.eos_token_id = 1 # <|endoftext|>
138
+ config_hf.pad_token_id = 0 # <unk>
139
+ else:
140
+ config_hf.bos_token_id = 2 # <s>
141
+ config_hf.eos_token_id = 3 # <|endoftext|>
142
+ config_hf.pad_token_id = 0 # <pad>
143
+
144
+ return config_hf
145
+
146
+
147
+ def main(args):
148
+ print(args)
149
+
150
+ checkpoint_path = args.checkpoint_path
151
+ save_path = args.save_path
152
+ if isfile(checkpoint_path):
153
+ raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}")
154
+
155
+ # Load the model.
156
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
157
+
158
+ # Load the config.
159
+ config_megatron = checkpoint["hyper_parameters"]["cfg"]
160
+ config_hf = GPT2Config()
161
+ config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron)
162
+ config_hf.architectures = ["GPT2LMHeadModel"]
163
+
164
+ sd_megatron = checkpoint["state_dict"]
165
+
166
+ # Convert.
167
+ print("Converting")
168
+ sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf)
169
+
170
+ # Print the structure of converted state dict.
171
+ if args.print_checkpoint_structure:
172
+ recursive_print(None, sd_hf)
173
+
174
+ config_hf.tokenizer_class = "GPTSw3Tokenizer"
175
+
176
+ # Store the config to file.
177
+ print("Saving config")
178
+ config_hf.save_pretrained(save_path)
179
+
180
+ # Store the state_dict to file.
181
+ output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin")
182
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
183
+ torch.save(sd_hf, output_checkpoint_file)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ parser = argparse.ArgumentParser()
188
+ parser.add_argument(
189
+ "--checkpoint_path",
190
+ type=str,
191
+ required=True,
192
+ help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000",
193
+ )
194
+ parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf")
195
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
196
+ _args = parser.parse_args()
197
+ main(_args)
venv/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The tokenizer used by the GPT-SW3 models."""
2
+
3
+ import os
4
+ import re
5
+ import unicodedata
6
+ from shutil import copyfile
7
+ from typing import Any, Dict, List, Optional, Tuple, Union
8
+
9
+ import sentencepiece as spm
10
+
11
+ from ...tokenization_utils import PreTrainedTokenizer
12
+ from ...utils import is_torch_available, logging
13
+
14
+
15
+ if is_torch_available():
16
+ import torch
17
+
18
+
19
+ logger = logging.get_logger(__name__)
20
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
21
+
22
+
23
+ class GPTSw3Tokenizer(PreTrainedTokenizer):
24
+ """
25
+ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
26
+
27
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
28
+ this superclass for more information regarding those methods.
29
+
30
+ Example usage:
31
+ ```python
32
+ >>> from transformers import GPTSw3Tokenizer
33
+
34
+ >>> tokenizer = GPTSw3Tokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-126m")
35
+ >>> tokenizer("Svenska är kul!")["input_ids"]
36
+ [1814, 377, 3617, 63504]
37
+ ```
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
42
+ contains the vocabulary necessary to instantiate a tokenizer.
43
+ do_lower_case (`bool`, *optional*, defaults to `False`):
44
+ Whether or not to lowercase the input when tokenizing.
45
+ remove_space (`bool`, *optional*, defaults to `False`):
46
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
47
+ keep_accents (`bool`, *optional*, defaults to `False`):
48
+ Whether or not to keep accents when tokenizing.
49
+ pad_token (`str`, *optional*):
50
+ The token used for padding, for example when batching sequences of different lengths. If not provided, will
51
+ default to '<pad>' or '<unk>' depending on model size.
52
+ unk_token (`str`, *optional*):
53
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
54
+ token instead. If not provided, will default to '<unk>'.
55
+ eos_token (`str`, *optional*):
56
+ The end of sequence token seen during pretraining. If not provided, will default to '<|endoftext|>'
57
+ bos_token (`str`, *optional*):
58
+ The beginning of sequence token that can be used for downstream task, was not seen during pretraining. If
59
+ not provided, will default to '<s>' or '<|endoftext|>', depending on model size.
60
+ sp_model_kwargs (`dict`, *optional*):
61
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
62
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
63
+ to set:
64
+
65
+ - `enable_sampling`: Enable subword regularization.
66
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
67
+
68
+ - `nbest_size = {0,1}`: No sampling is performed.
69
+ - `nbest_size > 1`: samples from the nbest_size results.
70
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
71
+ using forward-filtering-and-backward-sampling algorithm.
72
+
73
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
74
+ BPE-dropout.
75
+
76
+ Attributes:
77
+ sp_model (`SentencePieceProcessor`):
78
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
79
+ whitespaces (`set`):
80
+ The whitespaces that are replaced in the whitespace normalization in preprocessing.
81
+ non_printing_characters_re (`Pattern`):
82
+ The compiled regular expression to remove non-printing characters in preprocessing.
83
+ """
84
+
85
+ vocab_files_names = VOCAB_FILES_NAMES
86
+ model_input_names = ["input_ids", "attention_mask"]
87
+
88
+ def __init__(
89
+ self,
90
+ vocab_file,
91
+ do_lower_case=False,
92
+ remove_space=False,
93
+ keep_accents=False,
94
+ pad_token=None,
95
+ unk_token=None,
96
+ eos_token=None,
97
+ bos_token=None,
98
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
99
+ **kwargs,
100
+ ) -> None:
101
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
102
+
103
+ name_or_path = kwargs.get("name_or_path")
104
+ if name_or_path is None:
105
+ logger.warning(
106
+ "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
107
+ " you are testing the model, this can safely be ignored"
108
+ )
109
+ name_or_path = "None"
110
+
111
+ # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
112
+ eos_token = "<|endoftext|>" if eos_token is None else eos_token
113
+ unk_token = "<unk>" if unk_token is None else unk_token
114
+ if "gpt-sw3-7b" in name_or_path:
115
+ pad_token = unk_token if pad_token is None else pad_token
116
+ bos_token = eos_token if bos_token is None else bos_token
117
+ else:
118
+ pad_token = "<pad>" if pad_token is None else pad_token
119
+ bos_token = "<s>" if bos_token is None else bos_token
120
+
121
+ self.do_lower_case = do_lower_case
122
+ self.remove_space = remove_space
123
+ self.keep_accents = keep_accents
124
+ self.vocab_file = vocab_file
125
+
126
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
127
+ self.sp_model.Load(vocab_file)
128
+
129
+ # Used for whitespace normalization in input texts
130
+ # fmt : off
131
+ self.whitespaces = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"}
132
+ # fmt : on
133
+
134
+ # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
135
+ self.non_printing_characters_re = re.compile(
136
+ f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]"
137
+ )
138
+
139
+ super().__init__(
140
+ do_lower_case=do_lower_case,
141
+ remove_space=remove_space,
142
+ keep_accents=keep_accents,
143
+ bos_token=bos_token,
144
+ eos_token=eos_token,
145
+ unk_token=unk_token,
146
+ pad_token=pad_token,
147
+ sp_model_kwargs=self.sp_model_kwargs,
148
+ **kwargs,
149
+ )
150
+
151
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__getstate__
152
+ def __getstate__(self):
153
+ state = self.__dict__.copy()
154
+ state["sp_model"] = None
155
+ return state
156
+
157
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__setstate__
158
+ def __setstate__(self, d):
159
+ self.__dict__ = d
160
+
161
+ # for backward compatibility
162
+ if not hasattr(self, "sp_model_kwargs"):
163
+ self.sp_model_kwargs = {}
164
+
165
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
166
+ self.sp_model.Load(self.vocab_file)
167
+
168
+ @property
169
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
170
+ def vocab_size(self) -> int:
171
+ return len(self.sp_model)
172
+
173
+ def preprocess_text(self, text: str) -> str:
174
+ """
175
+ Returns the preprocessed text. This procedure is identical to what was used when training the tokenizer.
176
+ """
177
+
178
+ # Remove non-printing characters
179
+ text = self.non_printing_characters_re.sub("", text)
180
+
181
+ # Normalize whitespaces
182
+ text = "".join([char if char not in self.whitespaces else " " for char in text])
183
+
184
+ # NFC Unicode normalization
185
+ text = unicodedata.normalize("NFC", text)
186
+ return text
187
+
188
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
189
+ text = self.preprocess_text(text)
190
+ return self.sp_model.encode(text, out_type=str)
191
+
192
+ def _convert_token_to_id(self, token: str) -> int:
193
+ """Converts a token (str) to an id (int) using the vocab."""
194
+ return self.sp_model.PieceToId(token)
195
+
196
+ def _convert_id_to_token(self, index: int) -> str:
197
+ """Converts an index (int) to a token (str) using the vocab."""
198
+ return self.sp_model.IdToPiece(index)
199
+
200
+ @staticmethod
201
+ def clean_up_tokenization(out_string: str) -> str:
202
+ """Returns the input string, this function is overridden to remove the default clean up."""
203
+ return out_string
204
+
205
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
206
+ """Converts a sequence of tokens (strings) to a single string. Special tokens remain intact."""
207
+ current_sub_tokens = []
208
+ out_string = ""
209
+ prev_is_special = False
210
+ for token in tokens:
211
+ # make sure that special tokens are not decoded using sentencepiece model
212
+ if token in self.all_special_tokens:
213
+ # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
214
+ if not prev_is_special:
215
+ out_string += " "
216
+
217
+ out_string += self.sp_model.decode(current_sub_tokens) + token
218
+ prev_is_special = True
219
+ current_sub_tokens = []
220
+ else:
221
+ current_sub_tokens.append(token)
222
+ prev_is_special = False
223
+ out_string += self.sp_model.decode(current_sub_tokens)
224
+
225
+ return out_string
226
+
227
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.get_vocab
228
+ def get_vocab(self) -> Dict[str, int]:
229
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
230
+ vocab.update(self.added_tokens_encoder)
231
+ return vocab
232
+
233
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.save_vocabulary
234
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
235
+ if not os.path.isdir(save_directory):
236
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
237
+ return
238
+ out_vocab_file = os.path.join(
239
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
240
+ )
241
+
242
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
243
+ copyfile(self.vocab_file, out_vocab_file)
244
+ elif not os.path.isfile(self.vocab_file):
245
+ with open(out_vocab_file, "wb") as fi:
246
+ content_spiece_model = self.sp_model.serialized_model_proto()
247
+ fi.write(content_spiece_model)
248
+
249
+ return (out_vocab_file,)
250
+
251
+ def encode_fast(
252
+ self, text: Union[str, List[str]], return_tensors: Union[str, bool] = False
253
+ ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
254
+ """
255
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
256
+ functionality but is often much faster.
257
+
258
+ Does NOT handle special tokens correctly, these can manually be added as ids afterwards.
259
+
260
+ Does NOT support padding, these can manually be added as ids afterwards.
261
+
262
+ Use default HuggingFace tokenization methods for full functionality.
263
+
264
+ Args:
265
+ text (`str` or `List[str]`): One or several text(s) to convert to token ids.
266
+ return_tensors (`str` or `bool`): Returns PyTorch tensors if set to True or "pt"
267
+
268
+ Returns:
269
+ `List[int]`, `List[List[int]]`, or `torch.Tensor`: The encoded text(s) as token ids.
270
+ """
271
+
272
+ if isinstance(text, str):
273
+ text = self.preprocess_text(text)
274
+ token_ids = self.sp_model.encode(text)
275
+ else:
276
+ text = [self.preprocess_text(t) for t in text]
277
+ token_ids = self.sp_model.encode(text)
278
+
279
+ if return_tensors is True or return_tensors == "pt":
280
+ token_ids = torch.tensor(token_ids)
281
+
282
+ return token_ids
283
+
284
+ def decode_fast(self, token_ids: Union[int, List[int]]) -> str:
285
+ """
286
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
287
+ functionality but is often much faster.
288
+
289
+ Args:
290
+ token_ids (`int` or `List[int]`): Encoded token or text as token id(s).
291
+
292
+ Returns:
293
+ `str`: Decoded text
294
+ """
295
+
296
+ return self.sp_model.decode(token_ids)
297
+
298
+ @property
299
+ def default_chat_template(self):
300
+ """
301
+ This chat template formats messages like an instant messenger chat log, with "User:" and "Bot:" strings
302
+ preceding messages. BOS tokens are added between all messages.
303
+ """
304
+ logger.warning_once(
305
+ "\nNo chat template is defined for this tokenizer - using the default template "
306
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
307
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
308
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
309
+ )
310
+ return (
311
+ "{{ eos_token }}{{ bos_token }}"
312
+ "{% for message in messages %}"
313
+ "{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}"
314
+ "{% else %}{{ 'Bot: ' + message['content']}}{% endif %}"
315
+ "{{ message['text'] }}{{ bos_token }}"
316
+ "{% endfor %}"
317
+ "Bot:"
318
+ )