applied-ai-018 commited on
Commit
00f7b68
·
verified ·
1 Parent(s): 7dcc96a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py +71 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py +124 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py +1008 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py +185 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py +319 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py +45 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py +42 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py +408 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py +95 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py +168 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py +968 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py +73 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py +112 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py +220 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py +537 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +205 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py +29 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py +1487 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py +63 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +159 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py +70 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +608 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py +97 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_bert_generation"] = [
37
+ "BertGenerationDecoder",
38
+ "BertGenerationEncoder",
39
+ "BertGenerationPreTrainedModel",
40
+ "load_tf_weights_in_bert_generation",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_bert_generation import BertGenerationConfig
46
+
47
+ try:
48
+ if not is_sentencepiece_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .tokenization_bert_generation import BertGenerationTokenizer
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_bert_generation import (
62
+ BertGenerationDecoder,
63
+ BertGenerationEncoder,
64
+ BertGenerationPreTrainedModel,
65
+ load_tf_weights_in_bert_generation,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BertGeneration model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+
19
+
20
+ class BertGenerationConfig(PretrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
23
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
24
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
25
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
26
+ architecture.
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+ Args:
32
+ vocab_size (`int`, *optional*, defaults to 50358):
33
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
34
+ `inputs_ids` passed when calling [`BertGeneration`].
35
+ hidden_size (`int`, *optional*, defaults to 1024):
36
+ Dimensionality of the encoder layers and the pooler layer.
37
+ num_hidden_layers (`int`, *optional*, defaults to 24):
38
+ Number of hidden layers in the Transformer encoder.
39
+ num_attention_heads (`int`, *optional*, defaults to 16):
40
+ Number of attention heads for each attention layer in the Transformer encoder.
41
+ intermediate_size (`int`, *optional*, defaults to 4096):
42
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
43
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
44
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
45
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
46
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
47
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
48
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
49
+ The dropout ratio for the attention probabilities.
50
+ max_position_embeddings (`int`, *optional*, defaults to 512):
51
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
52
+ just in case (e.g., 512 or 1024 or 2048).
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ pad_token_id (`int`, *optional*, defaults to 0):
58
+ Padding token id.
59
+ bos_token_id (`int`, *optional*, defaults to 2):
60
+ Beginning of stream token id.
61
+ eos_token_id (`int`, *optional*, defaults to 1):
62
+ End of stream token id.
63
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
64
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
65
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
66
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
67
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
68
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+
73
+ Examples:
74
+
75
+ ```python
76
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
77
+
78
+ >>> # Initializing a BertGeneration config
79
+ >>> configuration = BertGenerationConfig()
80
+
81
+ >>> # Initializing a model (with random weights) from the config
82
+ >>> model = BertGenerationEncoder(configuration)
83
+
84
+ >>> # Accessing the model configuration
85
+ >>> configuration = model.config
86
+ ```"""
87
+
88
+ model_type = "bert-generation"
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_size=50358,
93
+ hidden_size=1024,
94
+ num_hidden_layers=24,
95
+ num_attention_heads=16,
96
+ intermediate_size=4096,
97
+ hidden_act="gelu",
98
+ hidden_dropout_prob=0.1,
99
+ attention_probs_dropout_prob=0.1,
100
+ max_position_embeddings=512,
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ pad_token_id=0,
104
+ bos_token_id=2,
105
+ eos_token_id=1,
106
+ position_embedding_type="absolute",
107
+ use_cache=True,
108
+ **kwargs,
109
+ ):
110
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
111
+
112
+ self.vocab_size = vocab_size
113
+ self.hidden_size = hidden_size
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.hidden_act = hidden_act
117
+ self.intermediate_size = intermediate_size
118
+ self.hidden_dropout_prob = hidden_dropout_prob
119
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
120
+ self.max_position_embeddings = max_position_embeddings
121
+ self.initializer_range = initializer_range
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.position_embedding_type = position_embedding_type
124
+ self.use_cache = use_cache
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py ADDED
@@ -0,0 +1,1008 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch BERT model specific for generation."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_bert_generation import BertGenerationConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder"
42
+ _CONFIG_FOR_DOC = "BertGenerationConfig"
43
+
44
+
45
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
46
+ class BertGenerationSelfOutput(nn.Module):
47
+ def __init__(self, config):
48
+ super().__init__()
49
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
50
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
51
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
52
+
53
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
54
+ hidden_states = self.dense(hidden_states)
55
+ hidden_states = self.dropout(hidden_states)
56
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
57
+ return hidden_states
58
+
59
+
60
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
61
+ class BertGenerationSelfAttention(nn.Module):
62
+ def __init__(self, config, position_embedding_type=None):
63
+ super().__init__()
64
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
65
+ raise ValueError(
66
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
67
+ f"heads ({config.num_attention_heads})"
68
+ )
69
+
70
+ self.num_attention_heads = config.num_attention_heads
71
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
72
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
73
+
74
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
75
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
76
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
77
+
78
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
79
+ self.position_embedding_type = position_embedding_type or getattr(
80
+ config, "position_embedding_type", "absolute"
81
+ )
82
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
83
+ self.max_position_embeddings = config.max_position_embeddings
84
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
85
+
86
+ self.is_decoder = config.is_decoder
87
+
88
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
89
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
90
+ x = x.view(new_x_shape)
91
+ return x.permute(0, 2, 1, 3)
92
+
93
+ def forward(
94
+ self,
95
+ hidden_states: torch.Tensor,
96
+ attention_mask: Optional[torch.FloatTensor] = None,
97
+ head_mask: Optional[torch.FloatTensor] = None,
98
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
99
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
100
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
101
+ output_attentions: Optional[bool] = False,
102
+ ) -> Tuple[torch.Tensor]:
103
+ mixed_query_layer = self.query(hidden_states)
104
+
105
+ # If this is instantiated as a cross-attention module, the keys
106
+ # and values come from an encoder; the attention mask needs to be
107
+ # such that the encoder's padding tokens are not attended to.
108
+ is_cross_attention = encoder_hidden_states is not None
109
+
110
+ if is_cross_attention and past_key_value is not None:
111
+ # reuse k,v, cross_attentions
112
+ key_layer = past_key_value[0]
113
+ value_layer = past_key_value[1]
114
+ attention_mask = encoder_attention_mask
115
+ elif is_cross_attention:
116
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
117
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
118
+ attention_mask = encoder_attention_mask
119
+ elif past_key_value is not None:
120
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
121
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
122
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
123
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
124
+ else:
125
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
126
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
127
+
128
+ query_layer = self.transpose_for_scores(mixed_query_layer)
129
+
130
+ use_cache = past_key_value is not None
131
+ if self.is_decoder:
132
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
133
+ # Further calls to cross_attention layer can then reuse all cross-attention
134
+ # key/value_states (first "if" case)
135
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
136
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
137
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
138
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
139
+ past_key_value = (key_layer, value_layer)
140
+
141
+ # Take the dot product between "query" and "key" to get the raw attention scores.
142
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
143
+
144
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
145
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
146
+ if use_cache:
147
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
148
+ -1, 1
149
+ )
150
+ else:
151
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
152
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
153
+ distance = position_ids_l - position_ids_r
154
+
155
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
156
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
157
+
158
+ if self.position_embedding_type == "relative_key":
159
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
160
+ attention_scores = attention_scores + relative_position_scores
161
+ elif self.position_embedding_type == "relative_key_query":
162
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
163
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
164
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
165
+
166
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
167
+ if attention_mask is not None:
168
+ # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function)
169
+ attention_scores = attention_scores + attention_mask
170
+
171
+ # Normalize the attention scores to probabilities.
172
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
173
+
174
+ # This is actually dropping out entire tokens to attend to, which might
175
+ # seem a bit unusual, but is taken from the original Transformer paper.
176
+ attention_probs = self.dropout(attention_probs)
177
+
178
+ # Mask heads if we want to
179
+ if head_mask is not None:
180
+ attention_probs = attention_probs * head_mask
181
+
182
+ context_layer = torch.matmul(attention_probs, value_layer)
183
+
184
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
185
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
186
+ context_layer = context_layer.view(new_context_layer_shape)
187
+
188
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
189
+
190
+ if self.is_decoder:
191
+ outputs = outputs + (past_key_value,)
192
+ return outputs
193
+
194
+
195
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration
196
+ class BertGenerationAttention(nn.Module):
197
+ def __init__(self, config, position_embedding_type=None):
198
+ super().__init__()
199
+ self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type)
200
+ self.output = BertGenerationSelfOutput(config)
201
+ self.pruned_heads = set()
202
+
203
+ def prune_heads(self, heads):
204
+ if len(heads) == 0:
205
+ return
206
+ heads, index = find_pruneable_heads_and_indices(
207
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
208
+ )
209
+
210
+ # Prune linear layers
211
+ self.self.query = prune_linear_layer(self.self.query, index)
212
+ self.self.key = prune_linear_layer(self.self.key, index)
213
+ self.self.value = prune_linear_layer(self.self.value, index)
214
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
215
+
216
+ # Update hyper params and store pruned heads
217
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
218
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
219
+ self.pruned_heads = self.pruned_heads.union(heads)
220
+
221
+ def forward(
222
+ self,
223
+ hidden_states: torch.Tensor,
224
+ attention_mask: Optional[torch.FloatTensor] = None,
225
+ head_mask: Optional[torch.FloatTensor] = None,
226
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
227
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
228
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
229
+ output_attentions: Optional[bool] = False,
230
+ ) -> Tuple[torch.Tensor]:
231
+ self_outputs = self.self(
232
+ hidden_states,
233
+ attention_mask,
234
+ head_mask,
235
+ encoder_hidden_states,
236
+ encoder_attention_mask,
237
+ past_key_value,
238
+ output_attentions,
239
+ )
240
+ attention_output = self.output(self_outputs[0], hidden_states)
241
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
242
+ return outputs
243
+
244
+
245
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
246
+ class BertGenerationIntermediate(nn.Module):
247
+ def __init__(self, config):
248
+ super().__init__()
249
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
250
+ if isinstance(config.hidden_act, str):
251
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
252
+ else:
253
+ self.intermediate_act_fn = config.hidden_act
254
+
255
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
256
+ hidden_states = self.dense(hidden_states)
257
+ hidden_states = self.intermediate_act_fn(hidden_states)
258
+ return hidden_states
259
+
260
+
261
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
262
+ class BertGenerationOutput(nn.Module):
263
+ def __init__(self, config):
264
+ super().__init__()
265
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
266
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
267
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
268
+
269
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
270
+ hidden_states = self.dense(hidden_states)
271
+ hidden_states = self.dropout(hidden_states)
272
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
273
+ return hidden_states
274
+
275
+
276
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
277
+ class BertGenerationLayer(nn.Module):
278
+ def __init__(self, config):
279
+ super().__init__()
280
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
281
+ self.seq_len_dim = 1
282
+ self.attention = BertGenerationAttention(config)
283
+ self.is_decoder = config.is_decoder
284
+ self.add_cross_attention = config.add_cross_attention
285
+ if self.add_cross_attention:
286
+ if not self.is_decoder:
287
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
288
+ self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute")
289
+ self.intermediate = BertGenerationIntermediate(config)
290
+ self.output = BertGenerationOutput(config)
291
+
292
+ def forward(
293
+ self,
294
+ hidden_states: torch.Tensor,
295
+ attention_mask: Optional[torch.FloatTensor] = None,
296
+ head_mask: Optional[torch.FloatTensor] = None,
297
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
298
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
299
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
300
+ output_attentions: Optional[bool] = False,
301
+ ) -> Tuple[torch.Tensor]:
302
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
303
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
304
+ self_attention_outputs = self.attention(
305
+ hidden_states,
306
+ attention_mask,
307
+ head_mask,
308
+ output_attentions=output_attentions,
309
+ past_key_value=self_attn_past_key_value,
310
+ )
311
+ attention_output = self_attention_outputs[0]
312
+
313
+ # if decoder, the last output is tuple of self-attn cache
314
+ if self.is_decoder:
315
+ outputs = self_attention_outputs[1:-1]
316
+ present_key_value = self_attention_outputs[-1]
317
+ else:
318
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
319
+
320
+ cross_attn_present_key_value = None
321
+ if self.is_decoder and encoder_hidden_states is not None:
322
+ if not hasattr(self, "crossattention"):
323
+ raise ValueError(
324
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
325
+ " by setting `config.add_cross_attention=True`"
326
+ )
327
+
328
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
329
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
330
+ cross_attention_outputs = self.crossattention(
331
+ attention_output,
332
+ attention_mask,
333
+ head_mask,
334
+ encoder_hidden_states,
335
+ encoder_attention_mask,
336
+ cross_attn_past_key_value,
337
+ output_attentions,
338
+ )
339
+ attention_output = cross_attention_outputs[0]
340
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
341
+
342
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
343
+ cross_attn_present_key_value = cross_attention_outputs[-1]
344
+ present_key_value = present_key_value + cross_attn_present_key_value
345
+
346
+ layer_output = apply_chunking_to_forward(
347
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
348
+ )
349
+ outputs = (layer_output,) + outputs
350
+
351
+ # if decoder, return the attn key/values as the last output
352
+ if self.is_decoder:
353
+ outputs = outputs + (present_key_value,)
354
+
355
+ return outputs
356
+
357
+ def feed_forward_chunk(self, attention_output):
358
+ intermediate_output = self.intermediate(attention_output)
359
+ layer_output = self.output(intermediate_output, attention_output)
360
+ return layer_output
361
+
362
+
363
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration
364
+ class BertEncoder(nn.Module):
365
+ def __init__(self, config):
366
+ super().__init__()
367
+ self.config = config
368
+ self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)])
369
+ self.gradient_checkpointing = False
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: Optional[torch.FloatTensor] = None,
375
+ head_mask: Optional[torch.FloatTensor] = None,
376
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
377
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
378
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
379
+ use_cache: Optional[bool] = None,
380
+ output_attentions: Optional[bool] = False,
381
+ output_hidden_states: Optional[bool] = False,
382
+ return_dict: Optional[bool] = True,
383
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
384
+ all_hidden_states = () if output_hidden_states else None
385
+ all_self_attentions = () if output_attentions else None
386
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
387
+
388
+ if self.gradient_checkpointing and self.training:
389
+ if use_cache:
390
+ logger.warning_once(
391
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
392
+ )
393
+ use_cache = False
394
+
395
+ next_decoder_cache = () if use_cache else None
396
+ for i, layer_module in enumerate(self.layer):
397
+ if output_hidden_states:
398
+ all_hidden_states = all_hidden_states + (hidden_states,)
399
+
400
+ layer_head_mask = head_mask[i] if head_mask is not None else None
401
+ past_key_value = past_key_values[i] if past_key_values is not None else None
402
+
403
+ if self.gradient_checkpointing and self.training:
404
+ layer_outputs = self._gradient_checkpointing_func(
405
+ layer_module.__call__,
406
+ hidden_states,
407
+ attention_mask,
408
+ layer_head_mask,
409
+ encoder_hidden_states,
410
+ encoder_attention_mask,
411
+ past_key_value,
412
+ output_attentions,
413
+ )
414
+ else:
415
+ layer_outputs = layer_module(
416
+ hidden_states,
417
+ attention_mask,
418
+ layer_head_mask,
419
+ encoder_hidden_states,
420
+ encoder_attention_mask,
421
+ past_key_value,
422
+ output_attentions,
423
+ )
424
+
425
+ hidden_states = layer_outputs[0]
426
+ if use_cache:
427
+ next_decoder_cache += (layer_outputs[-1],)
428
+ if output_attentions:
429
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
430
+ if self.config.add_cross_attention:
431
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
432
+
433
+ if output_hidden_states:
434
+ all_hidden_states = all_hidden_states + (hidden_states,)
435
+
436
+ if not return_dict:
437
+ return tuple(
438
+ v
439
+ for v in [
440
+ hidden_states,
441
+ next_decoder_cache,
442
+ all_hidden_states,
443
+ all_self_attentions,
444
+ all_cross_attentions,
445
+ ]
446
+ if v is not None
447
+ )
448
+ return BaseModelOutputWithPastAndCrossAttentions(
449
+ last_hidden_state=hidden_states,
450
+ past_key_values=next_decoder_cache,
451
+ hidden_states=all_hidden_states,
452
+ attentions=all_self_attentions,
453
+ cross_attentions=all_cross_attentions,
454
+ )
455
+
456
+
457
+ def load_tf_weights_in_bert_generation(
458
+ model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
459
+ ):
460
+ try:
461
+ import numpy as np
462
+ import tensorflow.compat.v1 as tf
463
+ import tensorflow_hub as hub
464
+ import tensorflow_text # noqa: F401
465
+
466
+ tf.disable_eager_execution()
467
+ except ImportError:
468
+ logger.error(
469
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
470
+ "https://www.tensorflow.org/install/ for installation instructions."
471
+ )
472
+ raise
473
+ tf_model = hub.Module(tf_hub_path)
474
+ init = tf.global_variables_initializer()
475
+ with tf.Session() as sess:
476
+ init.run()
477
+ all_variables = tf_model.variable_map
478
+ keep_track_variables = all_variables.copy()
479
+ for key in list(all_variables.keys()):
480
+ if "global" in key:
481
+ logger.info(f"Skipping {key}...")
482
+ continue
483
+ if not is_encoder:
484
+ model_pointer = getattr(model, model_class)
485
+ else:
486
+ model_pointer = model
487
+ is_embedding = False
488
+ logger.info(f"Trying to match {key}...")
489
+ # remove start_string = "module/bert/"
490
+ sub_layers = key.split("/")[2:]
491
+ if is_encoder_named_decoder and sub_layers[0] == "encoder":
492
+ logger.info(f"Skipping encoder layer {key} for decoder")
493
+ continue
494
+ if is_encoder and sub_layers[0] == "decoder":
495
+ logger.info(f"Skipping decoder layer {key} for encoder")
496
+ continue
497
+ for i, sub_layer in enumerate(sub_layers):
498
+ if sub_layer == "embeddings":
499
+ is_embedding = True
500
+ elif sub_layer == "LayerNorm":
501
+ is_embedding = False
502
+ if "layer" in sub_layer:
503
+ model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
504
+ elif sub_layer in ["kernel", "gamma"]:
505
+ model_pointer = model_pointer.weight
506
+ elif sub_layer == "beta":
507
+ model_pointer = model_pointer.bias
508
+ elif sub_layer == "encdec":
509
+ model_pointer = model_pointer.crossattention.self
510
+ elif sub_layer == "encdec_output":
511
+ model_pointer = model_pointer.crossattention.output
512
+ elif is_encoder_named_decoder and sub_layer == "decoder":
513
+ model_pointer = model_pointer.encoder
514
+ else:
515
+ if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
516
+ continue
517
+ try:
518
+ model_pointer = getattr(model_pointer, sub_layer)
519
+ except AttributeError:
520
+ logger.info(f"Skipping to initialize {key} at {sub_layer}...")
521
+ raise AttributeError
522
+
523
+ array = np.asarray(sess.run(all_variables[key]))
524
+ if not is_embedding:
525
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
526
+ array = np.transpose(array)
527
+ else:
528
+ model_pointer = model_pointer.weight
529
+
530
+ if model_pointer.shape != array.shape:
531
+ raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
532
+ logger.info(f"Initialize PyTorch weight {key}")
533
+
534
+ model_pointer.data = torch.from_numpy(array.astype(np.float32))
535
+ keep_track_variables.pop(key, None)
536
+
537
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
538
+ return model
539
+
540
+
541
+ class BertGenerationEmbeddings(nn.Module):
542
+ """Construct the embeddings from word and position embeddings."""
543
+
544
+ def __init__(self, config):
545
+ super().__init__()
546
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
547
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
548
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
549
+ # any TensorFlow checkpoint file
550
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
551
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
552
+
553
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
554
+ self.register_buffer(
555
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
556
+ )
557
+
558
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
559
+ if input_ids is not None:
560
+ input_shape = input_ids.size()
561
+ else:
562
+ input_shape = inputs_embeds.size()[:-1]
563
+
564
+ seq_length = input_shape[1]
565
+
566
+ if position_ids is None:
567
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
568
+
569
+ if inputs_embeds is None:
570
+ inputs_embeds = self.word_embeddings(input_ids)
571
+ position_embeddings = self.position_embeddings(position_ids)
572
+
573
+ embeddings = inputs_embeds + position_embeddings
574
+ embeddings = self.LayerNorm(embeddings)
575
+ embeddings = self.dropout(embeddings)
576
+ return embeddings
577
+
578
+
579
+ class BertGenerationPreTrainedModel(PreTrainedModel):
580
+ """
581
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
582
+ models.
583
+ """
584
+
585
+ config_class = BertGenerationConfig
586
+ base_model_prefix = "bert"
587
+ supports_gradient_checkpointing = True
588
+
589
+ def _init_weights(self, module):
590
+ """Initialize the weights"""
591
+ if isinstance(module, nn.Linear):
592
+ # Slightly different from the TF version which uses truncated_normal for initialization
593
+ # cf https://github.com/pytorch/pytorch/pull/5617
594
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
595
+ if module.bias is not None:
596
+ module.bias.data.zero_()
597
+ elif isinstance(module, nn.Embedding):
598
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
599
+ if module.padding_idx is not None:
600
+ module.weight.data[module.padding_idx].zero_()
601
+ elif isinstance(module, nn.LayerNorm):
602
+ module.bias.data.zero_()
603
+ module.weight.data.fill_(1.0)
604
+
605
+
606
+ BERT_GENERATION_START_DOCSTRING = r"""
607
+
608
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
609
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
610
+ etc.)
611
+
612
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
613
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
614
+ and behavior.
615
+
616
+ Parameters:
617
+ config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.
618
+ Initializing with a config file does not load the weights associated with the model, only the
619
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
620
+ """
621
+
622
+ BERT_GENERATION_INPUTS_DOCSTRING = r"""
623
+ Args:
624
+ input_ids (`torch.LongTensor` of shape `({0})`):
625
+ Indices of input sequence tokens in the vocabulary.
626
+
627
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
628
+ [`PreTrainedTokenizer.encode`] for details.
629
+
630
+ [What are input IDs?](../glossary#input-ids)
631
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
639
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
640
+ config.max_position_embeddings - 1]`.
641
+
642
+ [What are position IDs?](../glossary#position-ids)
643
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
644
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
645
+
646
+ - 1 indicates the head is **not masked**,
647
+ - 0 indicates the head is **masked**.
648
+
649
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
650
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
651
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
652
+ model's internal embedding lookup matrix.
653
+ output_attentions (`bool`, *optional*):
654
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
655
+ tensors for more detail.
656
+ output_hidden_states (`bool`, *optional*):
657
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
658
+ more detail.
659
+ return_dict (`bool`, *optional*):
660
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
661
+ """
662
+
663
+
664
+ @add_start_docstrings(
665
+ "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.",
666
+ BERT_GENERATION_START_DOCSTRING,
667
+ )
668
+ class BertGenerationEncoder(BertGenerationPreTrainedModel):
669
+ """
670
+
671
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
672
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
673
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
674
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
675
+
676
+ This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
677
+ described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
678
+ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
679
+
680
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
681
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
682
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
683
+ """
684
+
685
+ def __init__(self, config):
686
+ super().__init__(config)
687
+ self.config = config
688
+
689
+ self.embeddings = BertGenerationEmbeddings(config)
690
+ self.encoder = BertEncoder(config)
691
+
692
+ # Initialize weights and apply final processing
693
+ self.post_init()
694
+
695
+ def get_input_embeddings(self):
696
+ return self.embeddings.word_embeddings
697
+
698
+ def set_input_embeddings(self, value):
699
+ self.embeddings.word_embeddings = value
700
+
701
+ def _prune_heads(self, heads_to_prune):
702
+ """
703
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
704
+ class PreTrainedModel
705
+ """
706
+ for layer, heads in heads_to_prune.items():
707
+ self.encoder.layer[layer].attention.prune_heads(heads)
708
+
709
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
710
+ @add_code_sample_docstrings(
711
+ checkpoint=_CHECKPOINT_FOR_DOC,
712
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
713
+ config_class=_CONFIG_FOR_DOC,
714
+ )
715
+ def forward(
716
+ self,
717
+ input_ids: Optional[torch.Tensor] = None,
718
+ attention_mask: Optional[torch.Tensor] = None,
719
+ position_ids: Optional[torch.Tensor] = None,
720
+ head_mask: Optional[torch.Tensor] = None,
721
+ inputs_embeds: Optional[torch.Tensor] = None,
722
+ encoder_hidden_states: Optional[torch.Tensor] = None,
723
+ encoder_attention_mask: Optional[torch.Tensor] = None,
724
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
725
+ use_cache: Optional[bool] = None,
726
+ output_attentions: Optional[bool] = None,
727
+ output_hidden_states: Optional[bool] = None,
728
+ return_dict: Optional[bool] = None,
729
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
730
+ r"""
731
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
732
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
733
+ the model is configured as a decoder.
734
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
735
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
736
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for
737
+ tokens that are NOT MASKED, `0` for MASKED tokens.
738
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
739
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
740
+
741
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
742
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
743
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
744
+ use_cache (`bool`, *optional*):
745
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
746
+ `past_key_values`).
747
+ """
748
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
749
+ output_hidden_states = (
750
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
751
+ )
752
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
753
+
754
+ if self.config.is_decoder:
755
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
756
+ else:
757
+ use_cache = False
758
+
759
+ if input_ids is not None and inputs_embeds is not None:
760
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
761
+ elif input_ids is not None:
762
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
763
+ input_shape = input_ids.size()
764
+ elif inputs_embeds is not None:
765
+ input_shape = inputs_embeds.size()[:-1]
766
+ else:
767
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
768
+
769
+ batch_size, seq_length = input_shape
770
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
771
+
772
+ # past_key_values_length
773
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
774
+
775
+ if attention_mask is None:
776
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
777
+
778
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
779
+ # ourselves in which case we just need to make it broadcastable to all heads.
780
+ extended_attention_mask = None
781
+ if not use_cache:
782
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
783
+
784
+ # If a 2D or 3D attention mask is provided for the cross-attention
785
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
786
+ if self.config.is_decoder and encoder_hidden_states is not None:
787
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
788
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
789
+ if encoder_attention_mask is None:
790
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
791
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
792
+ else:
793
+ encoder_extended_attention_mask = None
794
+
795
+ # Prepare head mask if needed
796
+ # 1.0 in head_mask indicate we keep the head
797
+ # attention_probs has shape bsz x n_heads x N x N
798
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
799
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
800
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
801
+
802
+ embedding_output = self.embeddings(
803
+ input_ids=input_ids,
804
+ position_ids=position_ids,
805
+ inputs_embeds=inputs_embeds,
806
+ past_key_values_length=past_key_values_length,
807
+ )
808
+
809
+ encoder_outputs = self.encoder(
810
+ embedding_output,
811
+ attention_mask=extended_attention_mask,
812
+ head_mask=head_mask,
813
+ encoder_hidden_states=encoder_hidden_states,
814
+ encoder_attention_mask=encoder_extended_attention_mask,
815
+ past_key_values=past_key_values,
816
+ use_cache=use_cache,
817
+ output_attentions=output_attentions,
818
+ output_hidden_states=output_hidden_states,
819
+ return_dict=return_dict,
820
+ )
821
+ sequence_output = encoder_outputs[0]
822
+
823
+ if not return_dict:
824
+ return (sequence_output,) + encoder_outputs[1:]
825
+
826
+ return BaseModelOutputWithPastAndCrossAttentions(
827
+ last_hidden_state=sequence_output,
828
+ past_key_values=encoder_outputs.past_key_values,
829
+ hidden_states=encoder_outputs.hidden_states,
830
+ attentions=encoder_outputs.attentions,
831
+ cross_attentions=encoder_outputs.cross_attentions,
832
+ )
833
+
834
+
835
+ class BertGenerationOnlyLMHead(nn.Module):
836
+ def __init__(self, config):
837
+ super().__init__()
838
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
839
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
840
+ self.decoder.bias = self.bias
841
+
842
+ def forward(self, hidden_states):
843
+ logits = self.decoder(hidden_states)
844
+ return logits
845
+
846
+ def _tie_weights(self):
847
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
848
+ self.bias = self.decoder.bias
849
+
850
+
851
+ @add_start_docstrings(
852
+ """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""",
853
+ BERT_GENERATION_START_DOCSTRING,
854
+ )
855
+ class BertGenerationDecoder(BertGenerationPreTrainedModel):
856
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
857
+
858
+ def __init__(self, config):
859
+ super().__init__(config)
860
+
861
+ if not config.is_decoder:
862
+ logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
863
+
864
+ self.bert = BertGenerationEncoder(config)
865
+ self.lm_head = BertGenerationOnlyLMHead(config)
866
+
867
+ # Initialize weights and apply final processing
868
+ self.post_init()
869
+
870
+ def get_output_embeddings(self):
871
+ return self.lm_head.decoder
872
+
873
+ def set_output_embeddings(self, new_embeddings):
874
+ self.lm_head.decoder = new_embeddings
875
+
876
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
877
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.Tensor] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ position_ids: Optional[torch.Tensor] = None,
883
+ head_mask: Optional[torch.Tensor] = None,
884
+ inputs_embeds: Optional[torch.Tensor] = None,
885
+ encoder_hidden_states: Optional[torch.Tensor] = None,
886
+ encoder_attention_mask: Optional[torch.Tensor] = None,
887
+ labels: Optional[torch.Tensor] = None,
888
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
889
+ use_cache: Optional[bool] = None,
890
+ output_attentions: Optional[bool] = None,
891
+ output_hidden_states: Optional[bool] = None,
892
+ return_dict: Optional[bool] = None,
893
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
894
+ r"""
895
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
896
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
897
+ the model is configured as a decoder.
898
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
899
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
900
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
901
+
902
+ - 1 for tokens that are **not masked**,
903
+ - 0 for tokens that are **masked**.
904
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
905
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
906
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
907
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
908
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
909
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
910
+
911
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
912
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
913
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
914
+ use_cache (`bool`, *optional*):
915
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
916
+ `past_key_values`).
917
+
918
+ Returns:
919
+
920
+ Example:
921
+
922
+ ```python
923
+ >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
924
+ >>> import torch
925
+
926
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
927
+ >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
928
+ >>> config.is_decoder = True
929
+ >>> model = BertGenerationDecoder.from_pretrained(
930
+ ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
931
+ ... )
932
+
933
+ >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
934
+ >>> outputs = model(**inputs)
935
+
936
+ >>> prediction_logits = outputs.logits
937
+ ```"""
938
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
939
+ if labels is not None:
940
+ use_cache = False
941
+
942
+ outputs = self.bert(
943
+ input_ids,
944
+ attention_mask=attention_mask,
945
+ position_ids=position_ids,
946
+ head_mask=head_mask,
947
+ inputs_embeds=inputs_embeds,
948
+ encoder_hidden_states=encoder_hidden_states,
949
+ encoder_attention_mask=encoder_attention_mask,
950
+ past_key_values=past_key_values,
951
+ use_cache=use_cache,
952
+ output_attentions=output_attentions,
953
+ output_hidden_states=output_hidden_states,
954
+ return_dict=return_dict,
955
+ )
956
+
957
+ sequence_output = outputs[0]
958
+ prediction_scores = self.lm_head(sequence_output)
959
+
960
+ lm_loss = None
961
+ if labels is not None:
962
+ # we are doing next-token prediction; shift prediction scores and input ids by one
963
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
964
+ labels = labels[:, 1:].contiguous()
965
+ loss_fct = CrossEntropyLoss()
966
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
967
+
968
+ if not return_dict:
969
+ output = (prediction_scores,) + outputs[1:]
970
+ return ((lm_loss,) + output) if lm_loss is not None else output
971
+
972
+ return CausalLMOutputWithCrossAttentions(
973
+ loss=lm_loss,
974
+ logits=prediction_scores,
975
+ past_key_values=outputs.past_key_values,
976
+ hidden_states=outputs.hidden_states,
977
+ attentions=outputs.attentions,
978
+ cross_attentions=outputs.cross_attentions,
979
+ )
980
+
981
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
982
+ input_shape = input_ids.shape
983
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
984
+ if attention_mask is None:
985
+ attention_mask = input_ids.new_ones(input_shape)
986
+
987
+ # cut decoder_input_ids if past_key_values is used
988
+ if past_key_values is not None:
989
+ past_length = past_key_values[0][0].shape[2]
990
+
991
+ # Some generation methods already pass only the last input ID
992
+ if input_ids.shape[1] > past_length:
993
+ remove_prefix_length = past_length
994
+ else:
995
+ # Default to old behavior: keep only final ID
996
+ remove_prefix_length = input_ids.shape[1] - 1
997
+
998
+ input_ids = input_ids[:, remove_prefix_length:]
999
+
1000
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1001
+
1002
+ def _reorder_cache(self, past_key_values, beam_idx):
1003
+ reordered_past = ()
1004
+ for layer_past in past_key_values:
1005
+ reordered_past += (
1006
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1007
+ )
1008
+ return reordered_past
env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model BertGeneration."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {
34
+ "bert_for_seq_generation": (
35
+ "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
36
+ ),
37
+ }
38
+ }
39
+
40
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"bert_for_seq_generation": 512}
41
+
42
+
43
+ class BertGenerationTokenizer(PreTrainedTokenizer):
44
+ """
45
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
48
+ this superclass for more information regarding those methods.
49
+
50
+ Args:
51
+ vocab_file (`str`):
52
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
53
+ contains the vocabulary necessary to instantiate a tokenizer.
54
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
55
+ The begin of sequence token.
56
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
57
+ The end of sequence token.
58
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
59
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
60
+ token instead.
61
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
62
+ The token used for padding, for example when batching sequences of different lengths.
63
+ sep_token (`str`, *optional*, defaults to `"<::::>"`):
64
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
65
+ sequence classification or for a text and a question for question answering. It is also used as the last
66
+ token of a sequence built with special tokens.
67
+ sp_model_kwargs (`dict`, *optional*):
68
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
69
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
70
+ to set:
71
+
72
+ - `enable_sampling`: Enable subword regularization.
73
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
74
+
75
+ - `nbest_size = {0,1}`: No sampling is performed.
76
+ - `nbest_size > 1`: samples from the nbest_size results.
77
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
78
+ using forward-filtering-and-backward-sampling algorithm.
79
+
80
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
81
+ BPE-dropout.
82
+ """
83
+
84
+ vocab_files_names = VOCAB_FILES_NAMES
85
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
86
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
87
+ prefix_tokens: List[int] = []
88
+ model_input_names = ["input_ids", "attention_mask"]
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_file,
93
+ bos_token="<s>",
94
+ eos_token="</s>",
95
+ unk_token="<unk>",
96
+ pad_token="<pad>",
97
+ sep_token="<::::>",
98
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
99
+ **kwargs,
100
+ ) -> None:
101
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
102
+
103
+ self.vocab_file = vocab_file
104
+
105
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
106
+ self.sp_model.Load(vocab_file)
107
+
108
+ # Add extra_ids to the special token list
109
+ super().__init__(
110
+ bos_token=bos_token,
111
+ eos_token=eos_token,
112
+ unk_token=unk_token,
113
+ pad_token=pad_token,
114
+ sep_token=sep_token,
115
+ sp_model_kwargs=self.sp_model_kwargs,
116
+ **kwargs,
117
+ )
118
+
119
+ @property
120
+ def vocab_size(self):
121
+ return self.sp_model.get_piece_size()
122
+
123
+ def get_vocab(self):
124
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
125
+ vocab.update(self.added_tokens_encoder)
126
+ return vocab
127
+
128
+ def __getstate__(self):
129
+ state = self.__dict__.copy()
130
+ state["sp_model"] = None
131
+ return state
132
+
133
+ def __setstate__(self, d):
134
+ self.__dict__ = d
135
+
136
+ # for backward compatibility
137
+ if not hasattr(self, "sp_model_kwargs"):
138
+ self.sp_model_kwargs = {}
139
+
140
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
141
+ self.sp_model.Load(self.vocab_file)
142
+
143
+ def _tokenize(self, text: str) -> List[str]:
144
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
145
+ return self.sp_model.encode(text, out_type=str)
146
+
147
+ def _convert_token_to_id(self, token):
148
+ """Converts a token (str) in an id using the vocab."""
149
+ return self.sp_model.piece_to_id(token)
150
+
151
+ def _convert_id_to_token(self, index):
152
+ """Converts an index (integer) in a token (str) using the vocab."""
153
+ token = self.sp_model.IdToPiece(index)
154
+ return token
155
+
156
+ def convert_tokens_to_string(self, tokens):
157
+ """Converts a sequence of tokens (string) in a single string."""
158
+ current_sub_tokens = []
159
+ out_string = ""
160
+ for token in tokens:
161
+ # make sure that special tokens are not decoded using sentencepiece model
162
+ if token in self.all_special_tokens:
163
+ out_string += self.sp_model.decode(current_sub_tokens) + token
164
+ current_sub_tokens = []
165
+ else:
166
+ current_sub_tokens.append(token)
167
+ out_string += self.sp_model.decode(current_sub_tokens)
168
+ return out_string.strip()
169
+
170
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
171
+ if not os.path.isdir(save_directory):
172
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
173
+ return
174
+ out_vocab_file = os.path.join(
175
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
176
+ )
177
+
178
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
179
+ copyfile(self.vocab_file, out_vocab_file)
180
+ elif not os.path.isfile(self.vocab_file):
181
+ with open(out_vocab_file, "wb") as fi:
182
+ content_spiece_model = self.sp_model.serialized_model_proto()
183
+ fi.write(content_spiece_model)
184
+
185
+ return (out_vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020, The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Bort checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+
21
+ import gluonnlp as nlp
22
+ import mxnet as mx
23
+ import numpy as np
24
+ import torch
25
+ from gluonnlp.base import get_home_dir
26
+ from gluonnlp.model.bert import BERTEncoder
27
+ from gluonnlp.model.utils import _load_vocab
28
+ from gluonnlp.vocab import Vocab
29
+ from packaging import version
30
+ from torch import nn
31
+
32
+ from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
33
+ from transformers.models.bert.modeling_bert import (
34
+ BertIntermediate,
35
+ BertLayer,
36
+ BertOutput,
37
+ BertSelfAttention,
38
+ BertSelfOutput,
39
+ )
40
+ from transformers.utils import logging
41
+
42
+
43
+ if version.parse(nlp.__version__) != version.parse("0.8.3"):
44
+ raise Exception("requires gluonnlp == 0.8.3")
45
+
46
+ if version.parse(mx.__version__) != version.parse("1.5.0"):
47
+ raise Exception("requires mxnet == 1.5.0")
48
+
49
+ logging.set_verbosity_info()
50
+ logger = logging.get_logger(__name__)
51
+
52
+ SAMPLE_TEXT = "The Nymphenburg Palace is a beautiful palace in Munich!"
53
+
54
+
55
+ def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
56
+ """
57
+ Convert the original Bort checkpoint (based on MXNET and Gluonnlp) to our BERT structure-
58
+ """
59
+
60
+ # Original Bort configuration
61
+ bort_4_8_768_1024_hparams = {
62
+ "attention_cell": "multi_head",
63
+ "num_layers": 4,
64
+ "units": 1024,
65
+ "hidden_size": 768,
66
+ "max_length": 512,
67
+ "num_heads": 8,
68
+ "scaled": True,
69
+ "dropout": 0.1,
70
+ "use_residual": True,
71
+ "embed_size": 1024,
72
+ "embed_dropout": 0.1,
73
+ "word_embed": None,
74
+ "layer_norm_eps": 1e-5,
75
+ "token_type_vocab_size": 2,
76
+ }
77
+
78
+ predefined_args = bort_4_8_768_1024_hparams
79
+
80
+ # Let's construct the original Bort model here
81
+ # Taken from official BERT implementation, see:
82
+ # https://github.com/alexa/bort/blob/master/bort/bort.py
83
+ encoder = BERTEncoder(
84
+ attention_cell=predefined_args["attention_cell"],
85
+ num_layers=predefined_args["num_layers"],
86
+ units=predefined_args["units"],
87
+ hidden_size=predefined_args["hidden_size"],
88
+ max_length=predefined_args["max_length"],
89
+ num_heads=predefined_args["num_heads"],
90
+ scaled=predefined_args["scaled"],
91
+ dropout=predefined_args["dropout"],
92
+ output_attention=False,
93
+ output_all_encodings=False,
94
+ use_residual=predefined_args["use_residual"],
95
+ activation=predefined_args.get("activation", "gelu"),
96
+ layer_norm_eps=predefined_args.get("layer_norm_eps", None),
97
+ )
98
+
99
+ # Vocab information needs to be fetched first
100
+ # It's the same as RoBERTa, so RobertaTokenizer can be used later
101
+ vocab_name = "openwebtext_ccnews_stories_books_cased"
102
+
103
+ # Specify download folder to Gluonnlp's vocab
104
+ gluon_cache_dir = os.path.join(get_home_dir(), "models")
105
+ bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
106
+
107
+ original_bort = nlp.model.BERTModel(
108
+ encoder,
109
+ len(bort_vocab),
110
+ units=predefined_args["units"],
111
+ embed_size=predefined_args["embed_size"],
112
+ embed_dropout=predefined_args["embed_dropout"],
113
+ word_embed=predefined_args["word_embed"],
114
+ use_pooler=False,
115
+ use_token_type_embed=False,
116
+ token_type_vocab_size=predefined_args["token_type_vocab_size"],
117
+ use_classifier=False,
118
+ use_decoder=False,
119
+ )
120
+
121
+ original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
122
+ params = original_bort._collect_params_with_prefix()
123
+
124
+ # Build our config 🤗
125
+ hf_bort_config_json = {
126
+ "architectures": ["BertForMaskedLM"],
127
+ "attention_probs_dropout_prob": predefined_args["dropout"],
128
+ "hidden_act": "gelu",
129
+ "hidden_dropout_prob": predefined_args["dropout"],
130
+ "hidden_size": predefined_args["embed_size"],
131
+ "initializer_range": 0.02,
132
+ "intermediate_size": predefined_args["hidden_size"],
133
+ "layer_norm_eps": predefined_args["layer_norm_eps"],
134
+ "max_position_embeddings": predefined_args["max_length"],
135
+ "model_type": "bort",
136
+ "num_attention_heads": predefined_args["num_heads"],
137
+ "num_hidden_layers": predefined_args["num_layers"],
138
+ "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
139
+ "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
140
+ "vocab_size": len(bort_vocab),
141
+ }
142
+
143
+ hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
144
+ hf_bort_model = BertForMaskedLM(hf_bort_config)
145
+ hf_bort_model.eval()
146
+
147
+ # Parameter mapping table (Gluonnlp to Transformers)
148
+ # * denotes layer index
149
+ #
150
+ # | Gluon Parameter | Transformers Parameter
151
+ # | -------------------------------------------------------------- | ----------------------
152
+ # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
153
+ # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
154
+ # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
155
+ # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
156
+ # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
157
+ # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
158
+ # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
159
+ # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
160
+ # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
161
+ # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
162
+ # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
163
+ # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
164
+ # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
165
+ # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
166
+ # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
167
+ # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
168
+ # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
169
+ # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
170
+ # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
171
+ # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
172
+
173
+ # Helper function to convert MXNET Arrays to PyTorch
174
+ def to_torch(mx_array) -> nn.Parameter:
175
+ return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))
176
+
177
+ # Check param shapes and map new HF param back
178
+ def check_and_map_params(hf_param, gluon_param):
179
+ shape_hf = hf_param.shape
180
+
181
+ gluon_param = to_torch(params[gluon_param])
182
+ shape_gluon = gluon_param.shape
183
+
184
+ assert (
185
+ shape_hf == shape_gluon
186
+ ), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
187
+
188
+ return gluon_param
189
+
190
+ hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(
191
+ hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight"
192
+ )
193
+ hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(
194
+ hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight"
195
+ )
196
+ hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(
197
+ hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta"
198
+ )
199
+ hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(
200
+ hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma"
201
+ )
202
+
203
+ # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
204
+ hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
205
+ hf_bort_model.bert.embeddings.token_type_embeddings.weight.data
206
+ )
207
+
208
+ for i in range(hf_bort_config.num_hidden_layers):
209
+ layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
210
+
211
+ # self attention
212
+ self_attn: BertSelfAttention = layer.attention.self
213
+
214
+ self_attn.key.bias.data = check_and_map_params(
215
+ self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias"
216
+ )
217
+
218
+ self_attn.key.weight.data = check_and_map_params(
219
+ self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight"
220
+ )
221
+ self_attn.query.bias.data = check_and_map_params(
222
+ self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias"
223
+ )
224
+ self_attn.query.weight.data = check_and_map_params(
225
+ self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight"
226
+ )
227
+ self_attn.value.bias.data = check_and_map_params(
228
+ self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias"
229
+ )
230
+ self_attn.value.weight.data = check_and_map_params(
231
+ self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight"
232
+ )
233
+
234
+ # self attention output
235
+ self_output: BertSelfOutput = layer.attention.output
236
+
237
+ self_output.dense.bias = check_and_map_params(
238
+ self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias"
239
+ )
240
+ self_output.dense.weight = check_and_map_params(
241
+ self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight"
242
+ )
243
+ self_output.LayerNorm.bias = check_and_map_params(
244
+ self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta"
245
+ )
246
+ self_output.LayerNorm.weight = check_and_map_params(
247
+ self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma"
248
+ )
249
+
250
+ # intermediate
251
+ intermediate: BertIntermediate = layer.intermediate
252
+
253
+ intermediate.dense.bias = check_and_map_params(
254
+ intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias"
255
+ )
256
+ intermediate.dense.weight = check_and_map_params(
257
+ intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight"
258
+ )
259
+
260
+ # output
261
+ bert_output: BertOutput = layer.output
262
+
263
+ bert_output.dense.bias = check_and_map_params(
264
+ bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias"
265
+ )
266
+ bert_output.dense.weight = check_and_map_params(
267
+ bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight"
268
+ )
269
+ bert_output.LayerNorm.bias = check_and_map_params(
270
+ bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta"
271
+ )
272
+ bert_output.LayerNorm.weight = check_and_map_params(
273
+ bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma"
274
+ )
275
+
276
+ # Save space and energy 🎄
277
+ hf_bort_model.half()
278
+
279
+ # Compare output of both models
280
+ tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base")
281
+
282
+ input_ids = tokenizer.encode_plus(SAMPLE_TEXT)["input_ids"]
283
+
284
+ # Get gluon output
285
+ gluon_input_ids = mx.nd.array([input_ids])
286
+ output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
287
+
288
+ # Get Transformer output (save and reload model again)
289
+ hf_bort_model.save_pretrained(pytorch_dump_folder_path)
290
+ hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
291
+ hf_bort_model.eval()
292
+
293
+ input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors="pt")
294
+ output_hf = hf_bort_model(**input_ids)[0]
295
+
296
+ gluon_layer = output_gluon[0].asnumpy()
297
+ hf_layer = output_hf[0].detach().numpy()
298
+
299
+ max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item()
300
+ success = np.allclose(gluon_layer, hf_layer, atol=1e-3)
301
+
302
+ if success:
303
+ print("✔️ Both model do output the same tensors")
304
+ else:
305
+ print("❌ Both model do **NOT** output the same tensors")
306
+ print("Absolute difference is:", max_absolute_diff)
307
+
308
+
309
+ if __name__ == "__main__":
310
+ parser = argparse.ArgumentParser()
311
+ # Required parameters
312
+ parser.add_argument(
313
+ "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
314
+ )
315
+ parser.add_argument(
316
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
317
+ )
318
+ args = parser.parse_args()
319
+ convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_mmbt": ["MMBTConfig"]}
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_mmbt"] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ from .configuration_mmbt import MMBTConfig
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
41
+
42
+ else:
43
+ import sys
44
+
45
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (803 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ # Copyright (c) HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ MMBT configuration"""
17
+
18
+ from ....utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class MMBTConfig(object):
25
+ """
26
+ This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT
27
+ model according to the specified arguments, defining the model architecture.
28
+
29
+ Args:
30
+ config ([`PreTrainedConfig`]):
31
+ Config of the underlying Transformer models. Its values are copied over to use a single config.
32
+ num_labels (`int`, *optional*):
33
+ Size of final Linear layer for classification.
34
+ modal_hidden_size (`int`, *optional*, defaults to 2048):
35
+ Embedding dimension of the non-text modality encoder.
36
+ """
37
+
38
+ def __init__(self, config, num_labels=None, modal_hidden_size=2048):
39
+ self.__dict__ = config.__dict__
40
+ self.modal_hidden_size = modal_hidden_size
41
+ if num_labels:
42
+ self.num_labels = num_labels
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ # Copyright (c) HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch MMBT model."""
17
+
18
+
19
+ import torch
20
+ from torch import nn
21
+ from torch.nn import CrossEntropyLoss, MSELoss
22
+
23
+ from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
24
+ from ....modeling_utils import ModuleUtilsMixin
25
+ from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ _CONFIG_FOR_DOC = "MMBTConfig"
31
+
32
+
33
+ class ModalEmbeddings(nn.Module):
34
+ """Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
35
+
36
+ def __init__(self, config, encoder, embeddings):
37
+ super().__init__()
38
+ self.config = config
39
+ self.encoder = encoder
40
+ self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
41
+ self.position_embeddings = embeddings.position_embeddings
42
+ self.token_type_embeddings = embeddings.token_type_embeddings
43
+ self.word_embeddings = embeddings.word_embeddings
44
+ self.LayerNorm = embeddings.LayerNorm
45
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
46
+
47
+ def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
48
+ token_embeddings = self.proj_embeddings(self.encoder(input_modal))
49
+ seq_length = token_embeddings.size(1)
50
+
51
+ if start_token is not None:
52
+ start_token_embeds = self.word_embeddings(start_token)
53
+ seq_length += 1
54
+ token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
55
+
56
+ if end_token is not None:
57
+ end_token_embeds = self.word_embeddings(end_token)
58
+ seq_length += 1
59
+ token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
60
+
61
+ if position_ids is None:
62
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
63
+ position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
64
+
65
+ if token_type_ids is None:
66
+ token_type_ids = torch.zeros(
67
+ (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
68
+ )
69
+
70
+ position_embeddings = self.position_embeddings(position_ids)
71
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
72
+ embeddings = token_embeddings + position_embeddings + token_type_embeddings
73
+ embeddings = self.LayerNorm(embeddings)
74
+ embeddings = self.dropout(embeddings)
75
+ return embeddings
76
+
77
+
78
+ MMBT_START_DOCSTRING = r"""
79
+ MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and
80
+ Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
81
+ It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and
82
+ obtain state-of-the-art performance on various multimodal classification benchmark tasks.
83
+
84
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
85
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
86
+ etc.)
87
+
88
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
89
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
90
+ and behavior.
91
+
92
+ Parameters:
93
+ config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.
94
+ Initializing with a config file does not load the weights associated with the model, only the
95
+ configuration.
96
+ transformer (`nn.Module`): A text transformer that is used by MMBT.
97
+ It should have embeddings, encoder, and pooler attributes.
98
+ encoder (`nn.Module`): Encoder for the second modality.
99
+ It should take in a batch of modal inputs and return k, n dimension embeddings.
100
+ """
101
+
102
+ MMBT_INPUTS_DOCSTRING = r"""
103
+ Args:
104
+ input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):
105
+ The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image
106
+ Encoder, the shape would be (batch_size, channels, height, width)
107
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
108
+ Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's
109
+ appended to the end of other modality embeddings. Indices can be obtained using [`AutoTokenizer`]. See
110
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
111
+
112
+ [What are input IDs?](../glossary#input-ids)
113
+ modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
114
+ Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
115
+ tasks.
116
+ modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
117
+ Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
118
+ attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:
119
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
120
+
121
+ - 1 for tokens that are **not masked**,
122
+ - 0 for tokens that are **masked**.
123
+
124
+ [What are attention masks?](../glossary#attention-mask)
125
+ token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:
126
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
127
+ 1]`:
128
+
129
+ - 0 corresponds to a *sentence A* token,
130
+ - 1 corresponds to a *sentence B* token.
131
+
132
+ [What are token type IDs?](../glossary#token-type-ids)
133
+ modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:
134
+ Segment token indices to indicate different portions of the non-text modality. The embeddings from these
135
+ tokens will be summed with the respective token embeddings for the non-text modality.
136
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
137
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
138
+ config.max_position_embeddings - 1]`.
139
+
140
+ [What are position IDs?](../glossary#position-ids)
141
+ modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):
142
+ Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
143
+ Selected in the range `[0, config.max_position_embeddings - 1]`.
144
+
145
+ [What are position IDs?](../glossary#position-ids)
146
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
147
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
148
+
149
+ - 1 indicates the head is **not masked**,
150
+ - 0 indicates the head is **masked**.
151
+
152
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):
153
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
154
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
155
+ model's internal embedding lookup matrix.
156
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
157
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
158
+ the model is configured as a decoder.
159
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
160
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
161
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
162
+
163
+ - 1 for tokens that are **not masked**,
164
+ - 0 for tokens that are **masked**.
165
+
166
+ output_attentions (`bool`, *optional*):
167
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
168
+ tensors for more detail.
169
+ output_hidden_states (`bool`, *optional*):
170
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
171
+ more detail.
172
+ return_dict (`bool`, *optional*):
173
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
174
+ """
175
+
176
+
177
+ @add_start_docstrings(
178
+ "The bare MMBT Model outputting raw hidden-states without any specific head on top.",
179
+ MMBT_START_DOCSTRING,
180
+ )
181
+ class MMBTModel(nn.Module, ModuleUtilsMixin):
182
+ def __init__(self, config, transformer, encoder):
183
+ super().__init__()
184
+ self.config = config
185
+ self.transformer = transformer
186
+ self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
187
+
188
+ @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING)
189
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
190
+ def forward(
191
+ self,
192
+ input_modal,
193
+ input_ids=None,
194
+ modal_start_tokens=None,
195
+ modal_end_tokens=None,
196
+ attention_mask=None,
197
+ token_type_ids=None,
198
+ modal_token_type_ids=None,
199
+ position_ids=None,
200
+ modal_position_ids=None,
201
+ head_mask=None,
202
+ inputs_embeds=None,
203
+ encoder_hidden_states=None,
204
+ encoder_attention_mask=None,
205
+ output_attentions=None,
206
+ output_hidden_states=None,
207
+ return_dict=None,
208
+ ):
209
+ r"""
210
+ Returns:
211
+
212
+ Examples:
213
+
214
+ ```python
215
+ # For example purposes. Not runnable.
216
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
217
+ encoder = ImageEncoder(args)
218
+ mmbt = MMBTModel(config, transformer, encoder)
219
+ ```"""
220
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
221
+ output_hidden_states = (
222
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
223
+ )
224
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
225
+
226
+ if input_ids is not None and inputs_embeds is not None:
227
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
228
+ elif input_ids is not None:
229
+ input_txt_shape = input_ids.size()
230
+ elif inputs_embeds is not None:
231
+ input_txt_shape = inputs_embeds.size()[:-1]
232
+ else:
233
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
234
+
235
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
236
+
237
+ modal_embeddings = self.modal_encoder(
238
+ input_modal,
239
+ start_token=modal_start_tokens,
240
+ end_token=modal_end_tokens,
241
+ position_ids=modal_position_ids,
242
+ token_type_ids=modal_token_type_ids,
243
+ )
244
+
245
+ input_modal_shape = modal_embeddings.size()[:-1]
246
+
247
+ if token_type_ids is None:
248
+ token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
249
+
250
+ txt_embeddings = self.transformer.embeddings(
251
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
252
+ )
253
+
254
+ embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
255
+
256
+ input_shape = embedding_output.size()[:-1]
257
+
258
+ if attention_mask is None:
259
+ attention_mask = torch.ones(input_shape, device=device)
260
+ else:
261
+ attention_mask = torch.cat(
262
+ [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
263
+ )
264
+ if encoder_attention_mask is None:
265
+ encoder_attention_mask = torch.ones(input_shape, device=device)
266
+ else:
267
+ encoder_attention_mask = torch.cat(
268
+ [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
269
+ )
270
+
271
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
272
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
273
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
274
+
275
+ encoder_outputs = self.transformer.encoder(
276
+ embedding_output,
277
+ attention_mask=extended_attention_mask,
278
+ head_mask=head_mask,
279
+ encoder_hidden_states=encoder_hidden_states,
280
+ encoder_attention_mask=encoder_extended_attention_mask,
281
+ output_attentions=output_attentions,
282
+ output_hidden_states=output_hidden_states,
283
+ return_dict=return_dict,
284
+ )
285
+
286
+ sequence_output = encoder_outputs[0]
287
+ pooled_output = self.transformer.pooler(sequence_output)
288
+
289
+ if not return_dict:
290
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
291
+
292
+ return BaseModelOutputWithPooling(
293
+ last_hidden_state=sequence_output,
294
+ pooler_output=pooled_output,
295
+ hidden_states=encoder_outputs.hidden_states,
296
+ attentions=encoder_outputs.attentions,
297
+ )
298
+
299
+ def get_input_embeddings(self):
300
+ return self.embeddings.word_embeddings
301
+
302
+ def set_input_embeddings(self, value):
303
+ self.embeddings.word_embeddings = value
304
+
305
+
306
+ @add_start_docstrings(
307
+ """
308
+ MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
309
+ """,
310
+ MMBT_START_DOCSTRING,
311
+ MMBT_INPUTS_DOCSTRING,
312
+ )
313
+ class MMBTForClassification(nn.Module):
314
+ r"""
315
+ **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:
316
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
317
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
318
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
319
+
320
+ Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:
321
+ (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or
322
+ regression if config.num_labels==1) loss. **logits**:
323
+ `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if
324
+ config.num_labels==1) scores (before SoftMax).
325
+ **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for
326
+ the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:
327
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:
328
+ (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape
329
+ `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used
330
+ to compute the weighted average in the self-attention heads.
331
+
332
+ Examples:
333
+
334
+ ```python
335
+ # For example purposes. Not runnable.
336
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
337
+ encoder = ImageEncoder(args)
338
+ model = MMBTForClassification(config, transformer, encoder)
339
+ outputs = model(input_modal, input_ids, labels=labels)
340
+ loss, logits = outputs[:2]
341
+ ```"""
342
+
343
+ def __init__(self, config, transformer, encoder):
344
+ super().__init__()
345
+ self.num_labels = config.num_labels
346
+
347
+ self.mmbt = MMBTModel(config, transformer, encoder)
348
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
349
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
350
+
351
+ def forward(
352
+ self,
353
+ input_modal,
354
+ input_ids=None,
355
+ modal_start_tokens=None,
356
+ modal_end_tokens=None,
357
+ attention_mask=None,
358
+ token_type_ids=None,
359
+ modal_token_type_ids=None,
360
+ position_ids=None,
361
+ modal_position_ids=None,
362
+ head_mask=None,
363
+ inputs_embeds=None,
364
+ labels=None,
365
+ return_dict=None,
366
+ ):
367
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
368
+
369
+ outputs = self.mmbt(
370
+ input_modal=input_modal,
371
+ input_ids=input_ids,
372
+ modal_start_tokens=modal_start_tokens,
373
+ modal_end_tokens=modal_end_tokens,
374
+ attention_mask=attention_mask,
375
+ token_type_ids=token_type_ids,
376
+ modal_token_type_ids=modal_token_type_ids,
377
+ position_ids=position_ids,
378
+ modal_position_ids=modal_position_ids,
379
+ head_mask=head_mask,
380
+ inputs_embeds=inputs_embeds,
381
+ return_dict=return_dict,
382
+ )
383
+
384
+ pooled_output = outputs[1]
385
+
386
+ pooled_output = self.dropout(pooled_output)
387
+ logits = self.classifier(pooled_output)
388
+
389
+ loss = None
390
+ if labels is not None:
391
+ if self.num_labels == 1:
392
+ # We are doing regression
393
+ loss_fct = MSELoss()
394
+ loss = loss_fct(logits.view(-1), labels.view(-1))
395
+ else:
396
+ loss_fct = CrossEntropyLoss()
397
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
398
+
399
+ if not return_dict:
400
+ output = (logits,) + outputs[2:]
401
+ return ((loss,) + output) if loss is not None else output
402
+
403
+ return SequenceClassifierOutput(
404
+ loss=loss,
405
+ logits=logits,
406
+ hidden_states=outputs.hidden_states,
407
+ attentions=outputs.attentions,
408
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 EleutherAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ....utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_open_llama": ["OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenLlamaConfig"],
27
+ }
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_open_llama"] = ["LlamaTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_open_llama_fast"] = ["LlamaTokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_open_llama"] = [
52
+ "OpenLlamaForCausalLM",
53
+ "OpenLlamaModel",
54
+ "OpenLlamaPreTrainedModel",
55
+ "OpenLlamaForSequenceClassification",
56
+ ]
57
+
58
+
59
+ if TYPE_CHECKING:
60
+ from .configuration_open_llama import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig
61
+
62
+ try:
63
+ if not is_sentencepiece_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from transformers import LlamaTokenizer
69
+
70
+ try:
71
+ if not is_tokenizers_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ from transformers import LlamaTokenizerFast
77
+
78
+ try:
79
+ if not is_torch_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_open_llama import (
85
+ OpenLlamaForCausalLM,
86
+ OpenLlamaForSequenceClassification,
87
+ OpenLlamaModel,
88
+ OpenLlamaPreTrainedModel,
89
+ )
90
+
91
+
92
+ else:
93
+ import sys
94
+
95
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc ADDED
Binary file (6.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc ADDED
Binary file (31.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ Open-Llama model configuration"""
21
+
22
+ from ....configuration_utils import PretrainedConfig
23
+ from ....utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
30
+ }
31
+
32
+
33
+ class OpenLlamaConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an
36
+ Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the
38
+ [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1).
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 32000):
46
+ Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by
47
+ the `inputs_ids` passed when calling [`OpenLlamaModel`]
48
+ hidden_size (`int`, *optional*, defaults to 4096):
49
+ Dimension of the hidden representations.
50
+ intermediate_size (`int`, *optional*, defaults to 11008):
51
+ Dimension of the MLP representations.
52
+ num_hidden_layers (`int`, *optional*, defaults to 32):
53
+ Number of hidden layers in the Transformer encoder.
54
+ num_attention_heads (`int`, *optional*, defaults to 32):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57
+ The non-linear activation function (function or string) in the decoder.
58
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
59
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
60
+ just in case (e.g., 512 or 1024 or 2048).
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
69
+ Whether to tie weight embeddings
70
+ rope_scaling (`Dict`, *optional*):
71
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
72
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
73
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
74
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
75
+ these scaling strategies behave:
76
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
77
+ experimental feature, subject to breaking API changes in future versions.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> from transformers import OpenLlamaModel, OpenLlamaConfig
83
+
84
+ >>> # Initializing a Open-Llama open_llama-7b style configuration
85
+ >>> configuration = OpenLlamaConfig()
86
+
87
+ >>> # Initializing a model from the open_llama-7b style configuration
88
+ >>> model = OpenLlamaModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "open-llama"
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_size=100000,
99
+ hidden_size=4096,
100
+ intermediate_size=11008,
101
+ num_hidden_layers=32,
102
+ num_attention_heads=32,
103
+ hidden_act="silu",
104
+ max_position_embeddings=2048,
105
+ initializer_range=0.02,
106
+ rms_norm_eps=1e-6,
107
+ use_cache=True,
108
+ pad_token_id=0,
109
+ bos_token_id=1,
110
+ eos_token_id=2,
111
+ tie_word_embeddings=False,
112
+ use_memory_efficient_attention=True,
113
+ hidden_dropout_prob=0.1,
114
+ attention_dropout_prob=0.1,
115
+ use_stable_embedding=True,
116
+ shared_input_output_embedding=True,
117
+ rope_scaling=None,
118
+ **kwargs,
119
+ ):
120
+ self.vocab_size = vocab_size
121
+ self.max_position_embeddings = max_position_embeddings
122
+ self.hidden_size = hidden_size
123
+ self.intermediate_size = intermediate_size
124
+ self.num_hidden_layers = num_hidden_layers
125
+ self.num_attention_heads = num_attention_heads
126
+ self.hidden_act = hidden_act
127
+ self.initializer_range = initializer_range
128
+ self.rms_norm_eps = rms_norm_eps
129
+ self.use_cache = use_cache
130
+ self.use_memory_efficient_attention = kwargs.pop(
131
+ "use_memorry_efficient_attention", use_memory_efficient_attention
132
+ )
133
+ self.hidden_dropout_prob = hidden_dropout_prob
134
+ self.attention_dropout_prob = attention_dropout_prob
135
+ self.use_stable_embedding = use_stable_embedding
136
+ self.shared_input_output_embedding = shared_input_output_embedding
137
+ self.rope_scaling = rope_scaling
138
+ self._rope_scaling_validation()
139
+
140
+ super().__init__(
141
+ pad_token_id=pad_token_id,
142
+ bos_token_id=bos_token_id,
143
+ eos_token_id=eos_token_id,
144
+ tie_word_embeddings=tie_word_embeddings,
145
+ **kwargs,
146
+ )
147
+
148
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
149
+ def _rope_scaling_validation(self):
150
+ """
151
+ Validate the `rope_scaling` configuration.
152
+ """
153
+ if self.rope_scaling is None:
154
+ return
155
+
156
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
157
+ raise ValueError(
158
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
159
+ f"got {self.rope_scaling}"
160
+ )
161
+ rope_scaling_type = self.rope_scaling.get("type", None)
162
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
163
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
164
+ raise ValueError(
165
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
166
+ )
167
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
168
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py ADDED
@@ -0,0 +1,968 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Open-Llama model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ....activations import ACT2FN
30
+ from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
31
+ from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
32
+ from ....modeling_utils import PreTrainedModel
33
+ from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
34
+ from .configuration_open_llama import OpenLlamaConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ try:
40
+ from xformers import ops as xops
41
+ except ImportError:
42
+ xops = None
43
+
44
+
45
+ _CONFIG_FOR_DOC = "OpenLlamaConfig"
46
+
47
+
48
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->OpenLlama
49
+ class OpenLlamaRMSNorm(nn.Module):
50
+ def __init__(self, hidden_size, eps=1e-6):
51
+ """
52
+ OpenLlamaRMSNorm is equivalent to T5LayerNorm
53
+ """
54
+ super().__init__()
55
+ self.weight = nn.Parameter(torch.ones(hidden_size))
56
+ self.variance_epsilon = eps
57
+
58
+ def forward(self, hidden_states):
59
+ input_dtype = hidden_states.dtype
60
+ hidden_states = hidden_states.to(torch.float32)
61
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
62
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
63
+ return self.weight * hidden_states.to(input_dtype)
64
+
65
+
66
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->OpenLlama
67
+ class OpenLlamaRotaryEmbedding(nn.Module):
68
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
69
+ super().__init__()
70
+
71
+ self.dim = dim
72
+ self.max_position_embeddings = max_position_embeddings
73
+ self.base = base
74
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
75
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
76
+
77
+ # Build here to make `torch.jit.trace` work.
78
+ self._set_cos_sin_cache(
79
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
80
+ )
81
+
82
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
83
+ self.max_seq_len_cached = seq_len
84
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
85
+
86
+ freqs = torch.outer(t, self.inv_freq)
87
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
88
+ emb = torch.cat((freqs, freqs), dim=-1)
89
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
90
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
91
+
92
+ def forward(self, x, seq_len=None):
93
+ # x: [bs, num_attention_heads, seq_len, head_size]
94
+ if seq_len > self.max_seq_len_cached:
95
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
96
+
97
+ return (
98
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
99
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
100
+ )
101
+
102
+
103
+ # Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->OpenLlama
104
+ class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
105
+ """OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
106
+
107
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
108
+ self.scaling_factor = scaling_factor
109
+ super().__init__(dim, max_position_embeddings, base, device)
110
+
111
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
112
+ self.max_seq_len_cached = seq_len
113
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
114
+ t = t / self.scaling_factor
115
+
116
+ freqs = torch.outer(t, self.inv_freq)
117
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
118
+ emb = torch.cat((freqs, freqs), dim=-1)
119
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
120
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
121
+
122
+
123
+ # Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->OpenLlama
124
+ class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
125
+ """OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
126
+
127
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
128
+ self.scaling_factor = scaling_factor
129
+ super().__init__(dim, max_position_embeddings, base, device)
130
+
131
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
132
+ self.max_seq_len_cached = seq_len
133
+
134
+ if seq_len > self.max_position_embeddings:
135
+ base = self.base * (
136
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
137
+ ) ** (self.dim / (self.dim - 2))
138
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
139
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
140
+
141
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
142
+
143
+ freqs = torch.outer(t, self.inv_freq)
144
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
145
+ emb = torch.cat((freqs, freqs), dim=-1)
146
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
147
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
148
+
149
+
150
+ def rotate_half(x):
151
+ """Rotates half the hidden dims of the input."""
152
+ x1 = x[..., : x.shape[-1] // 2]
153
+ x2 = x[..., x.shape[-1] // 2 :]
154
+ return torch.cat((-x2, x1), dim=-1)
155
+
156
+
157
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
158
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
159
+ """Applies Rotary Position Embedding to the query and key tensors.
160
+
161
+ Args:
162
+ q (`torch.Tensor`): The query tensor.
163
+ k (`torch.Tensor`): The key tensor.
164
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
165
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
166
+ position_ids (`torch.Tensor`):
167
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
168
+ used to pass offsetted position ids when working with a KV-cache.
169
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
170
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
171
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
172
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
173
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
174
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
175
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
176
+ Returns:
177
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
178
+ """
179
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
180
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
181
+ q_embed = (q * cos) + (rotate_half(q) * sin)
182
+ k_embed = (k * cos) + (rotate_half(k) * sin)
183
+ return q_embed, k_embed
184
+
185
+
186
+ class OpenLlamaMLP(nn.Module):
187
+ def __init__(
188
+ self,
189
+ hidden_size: int,
190
+ intermediate_size: int,
191
+ hidden_act: str,
192
+ dropout_prob: float,
193
+ ):
194
+ super().__init__()
195
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
196
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
197
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
198
+ self.act_fn = ACT2FN[hidden_act]
199
+ self.dropout = nn.Dropout(dropout_prob)
200
+
201
+ def forward(self, x):
202
+ out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
203
+ return self.dropout(out)
204
+
205
+
206
+ class OpenLlamaAttention(nn.Module):
207
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
208
+
209
+ def __init__(self, config: OpenLlamaConfig):
210
+ super().__init__()
211
+ self.config = config
212
+ self.hidden_size = config.hidden_size
213
+ self.num_heads = config.num_attention_heads
214
+ self.head_dim = self.hidden_size // self.num_heads
215
+ self.max_position_embeddings = config.max_position_embeddings
216
+ self.dropout_prob = config.attention_dropout_prob
217
+
218
+ if (self.head_dim * self.num_heads) != self.hidden_size:
219
+ raise ValueError(
220
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
221
+ f" and `num_heads`: {self.num_heads})."
222
+ )
223
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
224
+ self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
225
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
226
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
227
+ self._init_rope()
228
+
229
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->OpenLlama
230
+ def _init_rope(self):
231
+ if self.config.rope_scaling is None:
232
+ self.rotary_emb = OpenLlamaRotaryEmbedding(
233
+ self.head_dim,
234
+ max_position_embeddings=self.max_position_embeddings,
235
+ base=self.rope_theta,
236
+ )
237
+ else:
238
+ scaling_type = self.config.rope_scaling["type"]
239
+ scaling_factor = self.config.rope_scaling["factor"]
240
+ if scaling_type == "linear":
241
+ self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding(
242
+ self.head_dim,
243
+ max_position_embeddings=self.max_position_embeddings,
244
+ scaling_factor=scaling_factor,
245
+ base=self.rope_theta,
246
+ )
247
+ elif scaling_type == "dynamic":
248
+ self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding(
249
+ self.head_dim,
250
+ max_position_embeddings=self.max_position_embeddings,
251
+ scaling_factor=scaling_factor,
252
+ base=self.rope_theta,
253
+ )
254
+ else:
255
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
256
+
257
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
258
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
259
+
260
+ def forward(
261
+ self,
262
+ hidden_states: torch.Tensor,
263
+ attention_mask: Optional[torch.Tensor] = None,
264
+ position_ids: Optional[torch.LongTensor] = None,
265
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
266
+ output_attentions: bool = False,
267
+ use_cache: bool = False,
268
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
269
+ bsz, q_len, _ = hidden_states.size()
270
+
271
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
272
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
273
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
274
+
275
+ kv_seq_len = key_states.shape[-2]
276
+ if past_key_value is not None:
277
+ kv_seq_len += past_key_value[0].shape[-2]
278
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
279
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
280
+ # [bsz, nh, t, hd]
281
+
282
+ if past_key_value is not None:
283
+ # reuse k, v, self_attention
284
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
285
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
286
+
287
+ past_key_value = (key_states, value_states) if use_cache else None
288
+
289
+ if self.config.use_memory_efficient_attention and xops is not None and self.training:
290
+ attn_weights = None
291
+ query_states = query_states.transpose(1, 2)
292
+ key_states = key_states.transpose(1, 2)
293
+ value_states = value_states.transpose(1, 2)
294
+ attn_output = xops.memory_efficient_attention(
295
+ query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob
296
+ )
297
+ else:
298
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
299
+
300
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
301
+ raise ValueError(
302
+ f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
303
+ f" {attn_weights.size()}"
304
+ )
305
+
306
+ if attention_mask is not None:
307
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
308
+ raise ValueError(
309
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
310
+ )
311
+ attn_weights = attn_weights + attention_mask
312
+ attn_weights = torch.max(
313
+ attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
314
+ )
315
+
316
+ # upcast attention to fp32
317
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
318
+ attn_output = torch.matmul(attn_weights, value_states)
319
+
320
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
321
+ raise ValueError(
322
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
323
+ f" {attn_output.size()}"
324
+ )
325
+
326
+ attn_output = attn_output.transpose(1, 2)
327
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
328
+
329
+ attn_output = self.o_proj(attn_output)
330
+
331
+ if not output_attentions:
332
+ attn_weights = None
333
+
334
+ return attn_output, attn_weights, past_key_value
335
+
336
+
337
+ class OpenLlamaDecoderLayer(nn.Module):
338
+ def __init__(self, config: OpenLlamaConfig):
339
+ super().__init__()
340
+ self.hidden_size = config.hidden_size
341
+ self.self_attn = OpenLlamaAttention(config=config)
342
+ self.mlp = OpenLlamaMLP(
343
+ hidden_size=self.hidden_size,
344
+ intermediate_size=config.intermediate_size,
345
+ hidden_act=config.hidden_act,
346
+ dropout_prob=config.hidden_dropout_prob,
347
+ )
348
+ self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
349
+ self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
350
+
351
+ def forward(
352
+ self,
353
+ hidden_states: torch.Tensor,
354
+ attention_mask: Optional[torch.Tensor] = None,
355
+ position_ids: Optional[torch.LongTensor] = None,
356
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
357
+ output_attentions: Optional[bool] = False,
358
+ use_cache: Optional[bool] = False,
359
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
360
+ """
361
+ Args:
362
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
363
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
364
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
365
+ output_attentions (`bool`, *optional*):
366
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
367
+ returned tensors for more detail.
368
+ use_cache (`bool`, *optional*):
369
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
370
+ (see `past_key_values`).
371
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
372
+ """
373
+
374
+ residual = hidden_states
375
+
376
+ hidden_states = self.input_layernorm(hidden_states)
377
+
378
+ # Self Attention
379
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
380
+ hidden_states=hidden_states,
381
+ attention_mask=attention_mask,
382
+ position_ids=position_ids,
383
+ past_key_value=past_key_value,
384
+ output_attentions=output_attentions,
385
+ use_cache=use_cache,
386
+ )
387
+ hidden_states = residual + hidden_states
388
+
389
+ # Fully Connected
390
+ residual = hidden_states
391
+ hidden_states = self.post_attention_layernorm(hidden_states)
392
+ hidden_states = self.mlp(hidden_states)
393
+ hidden_states = residual + hidden_states
394
+
395
+ outputs = (hidden_states,)
396
+
397
+ if output_attentions:
398
+ outputs += (self_attn_weights,)
399
+
400
+ if use_cache:
401
+ outputs += (present_key_value,)
402
+
403
+ return outputs
404
+
405
+
406
+ OPEN_LLAMA_START_DOCSTRING = r"""
407
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
408
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
409
+ etc.)
410
+
411
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
412
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
413
+ and behavior.
414
+
415
+ Parameters:
416
+ config ([`OpenLlamaConfig`]):
417
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
418
+ load the weights associated with the model, only the configuration. Check out the
419
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
420
+ """
421
+
422
+
423
+ @add_start_docstrings(
424
+ "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
425
+ OPEN_LLAMA_START_DOCSTRING,
426
+ )
427
+ class OpenLlamaPreTrainedModel(PreTrainedModel):
428
+ config_class = OpenLlamaConfig
429
+ base_model_prefix = "model"
430
+ supports_gradient_checkpointing = True
431
+ _no_split_modules = ["OpenLlamaDecoderLayer"]
432
+
433
+ def _init_weights(self, module):
434
+ std = self.config.initializer_range
435
+ if isinstance(module, nn.Linear):
436
+ module.weight.data.normal_(mean=0.0, std=std)
437
+ if module.bias is not None:
438
+ module.bias.data.zero_()
439
+ elif isinstance(module, nn.Embedding):
440
+ if self.config.use_stable_embedding:
441
+ torch.nn.init.xavier_normal_(module.weight.data)
442
+ else:
443
+ module.weight.data.normal_(mean=0.0, std=std)
444
+ if module.padding_idx is not None:
445
+ module.weight.data[module.padding_idx].zero_()
446
+
447
+
448
+ OPEN_LLAMA_INPUTS_DOCSTRING = r"""
449
+ Args:
450
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
451
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
452
+ it.
453
+
454
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
455
+ [`PreTrainedTokenizer.__call__`] for details.
456
+
457
+ [What are input IDs?](../glossary#input-ids)
458
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
459
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
460
+
461
+ - 1 for tokens that are **not masked**,
462
+ - 0 for tokens that are **masked**.
463
+
464
+ [What are attention masks?](../glossary#attention-mask)
465
+
466
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
467
+ [`PreTrainedTokenizer.__call__`] for details.
468
+
469
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
470
+ `past_key_values`).
471
+
472
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
473
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
474
+ information on the default strategy.
475
+
476
+ - 1 indicates the head is **not masked**,
477
+ - 0 indicates the head is **masked**.
478
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
479
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
480
+ config.n_positions - 1]`.
481
+
482
+ [What are position IDs?](../glossary#position-ids)
483
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
484
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
485
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
486
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
487
+
488
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
489
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
490
+
491
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
492
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
493
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
494
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
495
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
496
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
497
+ model's internal embedding lookup matrix.
498
+ use_cache (`bool`, *optional*):
499
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
500
+ `past_key_values`).
501
+ output_attentions (`bool`, *optional*):
502
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
503
+ tensors for more detail.
504
+ output_hidden_states (`bool`, *optional*):
505
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
506
+ more detail.
507
+ return_dict (`bool`, *optional*):
508
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
509
+ """
510
+
511
+
512
+ @add_start_docstrings(
513
+ "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
514
+ OPEN_LLAMA_START_DOCSTRING,
515
+ )
516
+ class OpenLlamaModel(OpenLlamaPreTrainedModel):
517
+ """
518
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`]
519
+
520
+ Args:
521
+ config: OpenLlamaConfig
522
+ """
523
+
524
+ def __init__(self, config: OpenLlamaConfig):
525
+ super().__init__(config)
526
+ self.padding_idx = config.pad_token_id
527
+ self.vocab_size = config.vocab_size
528
+
529
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
530
+ if config.use_stable_embedding:
531
+ self.embed_layer_norm = nn.LayerNorm(config.hidden_size)
532
+ else:
533
+ self.embed_layer_norm = None
534
+ self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
535
+ self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
536
+
537
+ self.gradient_checkpointing = False
538
+ # Initialize weights and apply final processing
539
+ self.post_init()
540
+
541
+ def get_input_embeddings(self):
542
+ return self.embed_tokens
543
+
544
+ def set_input_embeddings(self, value):
545
+ self.embed_tokens = value
546
+
547
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
548
+ def forward(
549
+ self,
550
+ input_ids: torch.LongTensor = None,
551
+ attention_mask: Optional[torch.Tensor] = None,
552
+ position_ids: Optional[torch.LongTensor] = None,
553
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
554
+ inputs_embeds: Optional[torch.FloatTensor] = None,
555
+ use_cache: Optional[bool] = None,
556
+ output_attentions: Optional[bool] = None,
557
+ output_hidden_states: Optional[bool] = None,
558
+ return_dict: Optional[bool] = None,
559
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
560
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
561
+ output_hidden_states = (
562
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
563
+ )
564
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
565
+
566
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
567
+
568
+ # retrieve input_ids and inputs_embeds
569
+ if input_ids is not None and inputs_embeds is not None:
570
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
571
+ elif input_ids is not None:
572
+ batch_size, seq_length = input_ids.shape
573
+ elif inputs_embeds is not None:
574
+ batch_size, seq_length, _ = inputs_embeds.shape
575
+ else:
576
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
577
+
578
+ seq_length_with_past = seq_length
579
+ past_key_values_length = 0
580
+
581
+ if self.gradient_checkpointing and self.training:
582
+ if use_cache:
583
+ logger.warning_once(
584
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
585
+ )
586
+ use_cache = False
587
+
588
+ if past_key_values is not None:
589
+ past_key_values_length = past_key_values[0][0].shape[2]
590
+ seq_length_with_past = seq_length_with_past + past_key_values_length
591
+
592
+ if position_ids is None:
593
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
594
+ position_ids = torch.arange(
595
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
596
+ )
597
+ position_ids = position_ids.unsqueeze(0)
598
+
599
+ if inputs_embeds is None:
600
+ inputs_embeds = self.embed_tokens(input_ids)
601
+ if self.embed_layer_norm:
602
+ inputs_embeds = self.embed_layer_norm(inputs_embeds)
603
+ # embed positions
604
+ if self.config.use_memory_efficient_attention and self.training:
605
+ attention_mask = None
606
+ elif attention_mask is None:
607
+ attention_mask = torch.ones(
608
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
609
+ )
610
+
611
+ input_shape = (batch_size, seq_length)
612
+ attention_mask = _prepare_4d_causal_attention_mask(
613
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
614
+ )
615
+
616
+ hidden_states = inputs_embeds
617
+
618
+ # decoder layers
619
+ all_hidden_states = () if output_hidden_states else None
620
+ all_self_attns = () if output_attentions else None
621
+ next_decoder_cache = () if use_cache else None
622
+
623
+ for idx, decoder_layer in enumerate(self.layers):
624
+ if output_hidden_states:
625
+ all_hidden_states += (hidden_states,)
626
+
627
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
628
+
629
+ if self.gradient_checkpointing and self.training:
630
+ layer_outputs = self._gradient_checkpointing_func(
631
+ decoder_layer.__call__,
632
+ hidden_states,
633
+ attention_mask,
634
+ position_ids,
635
+ None,
636
+ output_attentions,
637
+ None,
638
+ )
639
+ else:
640
+ layer_outputs = decoder_layer(
641
+ hidden_states,
642
+ attention_mask=attention_mask,
643
+ position_ids=position_ids,
644
+ past_key_value=past_key_value,
645
+ output_attentions=output_attentions,
646
+ use_cache=use_cache,
647
+ )
648
+
649
+ hidden_states = layer_outputs[0]
650
+
651
+ if use_cache:
652
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
653
+
654
+ if output_attentions:
655
+ all_self_attns += (layer_outputs[1],)
656
+
657
+ hidden_states = self.norm(hidden_states)
658
+
659
+ # add hidden states from the last decoder layer
660
+ if output_hidden_states:
661
+ all_hidden_states += (hidden_states,)
662
+
663
+ next_cache = next_decoder_cache if use_cache else None
664
+ if not return_dict:
665
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
666
+ return BaseModelOutputWithPast(
667
+ last_hidden_state=hidden_states,
668
+ past_key_values=next_cache,
669
+ hidden_states=all_hidden_states,
670
+ attentions=all_self_attns,
671
+ )
672
+
673
+
674
+ class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel):
675
+ def __init__(self, config):
676
+ super().__init__(config)
677
+ self.model = OpenLlamaModel(config)
678
+ if config.shared_input_output_embedding:
679
+ self.lm_head = None
680
+ else:
681
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
682
+
683
+ # Initialize weights and apply final processing
684
+ self.post_init()
685
+
686
+ def get_input_embeddings(self):
687
+ return self.model.embed_tokens
688
+
689
+ def set_input_embeddings(self, value):
690
+ self.model.embed_tokens = value
691
+
692
+ def get_output_embeddings(self):
693
+ return self.lm_head
694
+
695
+ def set_output_embeddings(self, new_embeddings):
696
+ self.lm_head = new_embeddings
697
+
698
+ def set_decoder(self, decoder):
699
+ self.model = decoder
700
+
701
+ def get_decoder(self):
702
+ return self.model
703
+
704
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
705
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
706
+ def forward(
707
+ self,
708
+ input_ids: torch.LongTensor = None,
709
+ attention_mask: Optional[torch.Tensor] = None,
710
+ position_ids: Optional[torch.LongTensor] = None,
711
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
712
+ inputs_embeds: Optional[torch.FloatTensor] = None,
713
+ labels: Optional[torch.LongTensor] = None,
714
+ use_cache: Optional[bool] = None,
715
+ output_attentions: Optional[bool] = None,
716
+ output_hidden_states: Optional[bool] = None,
717
+ return_dict: Optional[bool] = None,
718
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
719
+ r"""
720
+ Args:
721
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
722
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
723
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
724
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
725
+
726
+ Returns:
727
+
728
+ Example:
729
+
730
+ ```python
731
+ >>> from transformers import AutoTokenizer, OpenLlamaForCausalLM
732
+
733
+ >>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b")
734
+ >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
735
+
736
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
737
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
738
+
739
+ >>> # Generate
740
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
741
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
742
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
743
+ ```"""
744
+
745
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
746
+ output_hidden_states = (
747
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
748
+ )
749
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
750
+
751
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
752
+ outputs = self.model(
753
+ input_ids=input_ids,
754
+ attention_mask=attention_mask,
755
+ position_ids=position_ids,
756
+ past_key_values=past_key_values,
757
+ inputs_embeds=inputs_embeds,
758
+ use_cache=use_cache,
759
+ output_attentions=output_attentions,
760
+ output_hidden_states=output_hidden_states,
761
+ return_dict=return_dict,
762
+ )
763
+
764
+ hidden_states = outputs[0]
765
+ if self.config.shared_input_output_embedding:
766
+ logits = torch.einsum(
767
+ "blh,vh->blv", hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight
768
+ )
769
+ else:
770
+ logits = self.lm_head(hidden_states)
771
+
772
+ loss = None
773
+ if labels is not None:
774
+ # move labels to correct device to enable model parallelism
775
+ labels = labels.to(logits.device)
776
+ # Shift so that tokens < n predict n
777
+ shift_logits = logits[..., :-1, :].contiguous()
778
+ shift_labels = labels[..., 1:].contiguous()
779
+ # Flatten the tokens
780
+ loss_fct = CrossEntropyLoss()
781
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
782
+ shift_labels = shift_labels.view(-1)
783
+ # Enable model parallelism
784
+ shift_labels = shift_labels.to(shift_logits.device)
785
+ loss = loss_fct(shift_logits, shift_labels)
786
+
787
+ if not return_dict:
788
+ output = (logits,) + outputs[1:]
789
+ return (loss,) + output if loss is not None else output
790
+
791
+ return CausalLMOutputWithPast(
792
+ loss=loss,
793
+ logits=logits,
794
+ past_key_values=outputs.past_key_values,
795
+ hidden_states=outputs.hidden_states,
796
+ attentions=outputs.attentions,
797
+ )
798
+
799
+ def prepare_inputs_for_generation(
800
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
801
+ ):
802
+ if past_key_values is not None:
803
+ past_length = past_key_values[0][0].shape[2]
804
+
805
+ # Some generation methods already pass only the last input ID
806
+ if input_ids.shape[1] > past_length:
807
+ remove_prefix_length = past_length
808
+ else:
809
+ # Default to old behavior: keep only final ID
810
+ remove_prefix_length = input_ids.shape[1] - 1
811
+
812
+ input_ids = input_ids[:, remove_prefix_length:]
813
+
814
+ position_ids = kwargs.get("position_ids", None)
815
+ if attention_mask is not None and position_ids is None:
816
+ # create position_ids on the fly for batch generation
817
+ position_ids = attention_mask.long().cumsum(-1) - 1
818
+ position_ids.masked_fill_(attention_mask == 0, 1)
819
+ if past_key_values:
820
+ position_ids = position_ids[:, -input_ids.shape[1] :]
821
+
822
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
823
+ if inputs_embeds is not None and past_key_values is None:
824
+ model_inputs = {"inputs_embeds": inputs_embeds}
825
+ else:
826
+ model_inputs = {"input_ids": input_ids}
827
+
828
+ model_inputs.update(
829
+ {
830
+ "position_ids": position_ids,
831
+ "past_key_values": past_key_values,
832
+ "use_cache": kwargs.get("use_cache"),
833
+ "attention_mask": attention_mask,
834
+ }
835
+ )
836
+ return model_inputs
837
+
838
+ @staticmethod
839
+ def _reorder_cache(past_key_values, beam_idx):
840
+ reordered_past = ()
841
+ for layer_past in past_key_values:
842
+ reordered_past += (
843
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
844
+ )
845
+ return reordered_past
846
+
847
+
848
+ @add_start_docstrings(
849
+ """
850
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
851
+
852
+ [`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal
853
+ models (e.g. GPT-2) do.
854
+
855
+ Since it does classification on the last token, it requires to know the position of the last token. If a
856
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
857
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
858
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
859
+ each row of the batch).
860
+ """,
861
+ OPEN_LLAMA_START_DOCSTRING,
862
+ )
863
+ class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel):
864
+ def __init__(self, config):
865
+ super().__init__(config)
866
+ self.num_labels = config.num_labels
867
+ self.model = OpenLlamaModel(config)
868
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
869
+
870
+ # Initialize weights and apply final processing
871
+ self.post_init()
872
+
873
+ def get_input_embeddings(self):
874
+ return self.model.embed_tokens
875
+
876
+ def set_input_embeddings(self, value):
877
+ self.model.embed_tokens = value
878
+
879
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
880
+ def forward(
881
+ self,
882
+ input_ids: torch.LongTensor = None,
883
+ attention_mask: Optional[torch.Tensor] = None,
884
+ position_ids: Optional[torch.LongTensor] = None,
885
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
886
+ inputs_embeds: Optional[torch.FloatTensor] = None,
887
+ labels: Optional[torch.LongTensor] = None,
888
+ use_cache: Optional[bool] = None,
889
+ output_attentions: Optional[bool] = None,
890
+ output_hidden_states: Optional[bool] = None,
891
+ return_dict: Optional[bool] = None,
892
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
893
+ r"""
894
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
895
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
896
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
897
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
898
+ """
899
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
900
+
901
+ transformer_outputs = self.model(
902
+ input_ids,
903
+ attention_mask=attention_mask,
904
+ position_ids=position_ids,
905
+ past_key_values=past_key_values,
906
+ inputs_embeds=inputs_embeds,
907
+ use_cache=use_cache,
908
+ output_attentions=output_attentions,
909
+ output_hidden_states=output_hidden_states,
910
+ return_dict=return_dict,
911
+ )
912
+ hidden_states = transformer_outputs[0]
913
+ logits = self.score(hidden_states)
914
+
915
+ if input_ids is not None:
916
+ batch_size = input_ids.shape[0]
917
+ else:
918
+ batch_size = inputs_embeds.shape[0]
919
+
920
+ if self.config.pad_token_id is None and batch_size != 1:
921
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
922
+ if self.config.pad_token_id is None:
923
+ sequence_lengths = -1
924
+ else:
925
+ if input_ids is not None:
926
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
927
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
928
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
929
+ sequence_lengths = sequence_lengths.to(logits.device)
930
+ else:
931
+ sequence_lengths = -1
932
+
933
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
934
+
935
+ loss = None
936
+ if labels is not None:
937
+ labels = labels.to(logits.device)
938
+ if self.config.problem_type is None:
939
+ if self.num_labels == 1:
940
+ self.config.problem_type = "regression"
941
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
942
+ self.config.problem_type = "single_label_classification"
943
+ else:
944
+ self.config.problem_type = "multi_label_classification"
945
+
946
+ if self.config.problem_type == "regression":
947
+ loss_fct = MSELoss()
948
+ if self.num_labels == 1:
949
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
950
+ else:
951
+ loss = loss_fct(pooled_logits, labels)
952
+ elif self.config.problem_type == "single_label_classification":
953
+ loss_fct = CrossEntropyLoss()
954
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
955
+ elif self.config.problem_type == "multi_label_classification":
956
+ loss_fct = BCEWithLogitsLoss()
957
+ loss = loss_fct(pooled_logits, labels)
958
+ if not return_dict:
959
+ output = (pooled_logits,) + transformer_outputs[1:]
960
+ return ((loss,) + output) if loss is not None else output
961
+
962
+ return SequenceClassifierOutputWithPast(
963
+ loss=loss,
964
+ logits=pooled_logits,
965
+ past_key_values=transformer_outputs.past_key_values,
966
+ hidden_states=transformer_outputs.hidden_states,
967
+ attentions=transformer_outputs.attentions,
968
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"],
22
+ "tokenization_retribert": ["RetriBertTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_tokenizers_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["tokenization_retribert_fast"] = ["RetriBertTokenizerFast"]
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_retribert"] = [
40
+ "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "RetriBertModel",
42
+ "RetriBertPreTrainedModel",
43
+ ]
44
+
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
48
+ from .tokenization_retribert import RetriBertTokenizer
49
+
50
+ try:
51
+ if not is_tokenizers_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .tokenization_retribert_fast import RetriBertTokenizerFast
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_retribert import (
65
+ RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
66
+ RetriBertModel,
67
+ RetriBertPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc ADDED
Binary file (4.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc ADDED
Binary file (7.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc ADDED
Binary file (17.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ RetriBERT model configuration"""
16
+
17
+ from ....configuration_utils import PretrainedConfig
18
+ from ....utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ # TODO: upload to AWS
24
+ RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "yjernite/retribert-base-uncased": (
26
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
27
+ ),
28
+ }
29
+
30
+
31
+ class RetriBertConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
34
+ RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
35
+ configuration with the defaults will yield a similar configuration to that of the RetriBERT
36
+ [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
45
+ the `inputs_ids` passed when calling [`RetriBertModel`]
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ share_encoders (`bool`, *optional*, defaults to `True`):
71
+ Whether or not to use the same Bert-type encoder for the queries and document
72
+ projection_dim (`int`, *optional*, defaults to 128):
73
+ Final dimension of the query and document representation after projection
74
+ """
75
+
76
+ model_type = "retribert"
77
+
78
+ def __init__(
79
+ self,
80
+ vocab_size=30522,
81
+ hidden_size=768,
82
+ num_hidden_layers=8,
83
+ num_attention_heads=12,
84
+ intermediate_size=3072,
85
+ hidden_act="gelu",
86
+ hidden_dropout_prob=0.1,
87
+ attention_probs_dropout_prob=0.1,
88
+ max_position_embeddings=512,
89
+ type_vocab_size=2,
90
+ initializer_range=0.02,
91
+ layer_norm_eps=1e-12,
92
+ share_encoders=True,
93
+ projection_dim=128,
94
+ pad_token_id=0,
95
+ **kwargs,
96
+ ):
97
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
98
+
99
+ self.vocab_size = vocab_size
100
+ self.hidden_size = hidden_size
101
+ self.num_hidden_layers = num_hidden_layers
102
+ self.num_attention_heads = num_attention_heads
103
+ self.hidden_act = hidden_act
104
+ self.intermediate_size = intermediate_size
105
+ self.hidden_dropout_prob = hidden_dropout_prob
106
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
107
+ self.max_position_embeddings = max_position_embeddings
108
+ self.type_vocab_size = type_vocab_size
109
+ self.initializer_range = initializer_range
110
+ self.layer_norm_eps = layer_norm_eps
111
+ self.share_encoders = share_encoders
112
+ self.projection_dim = projection_dim
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ RetriBERT model
17
+ """
18
+
19
+
20
+ import math
21
+ from typing import Optional
22
+
23
+ import torch
24
+ import torch.utils.checkpoint as checkpoint
25
+ from torch import nn
26
+
27
+ from ....modeling_utils import PreTrainedModel
28
+ from ....utils import add_start_docstrings, logging
29
+ from ...bert.modeling_bert import BertModel
30
+ from .configuration_retribert import RetriBertConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
36
+ "yjernite/retribert-base-uncased",
37
+ # See all RetriBert models at https://huggingface.co/models?filter=retribert
38
+ ]
39
+
40
+
41
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
42
+ class RetriBertPreTrainedModel(PreTrainedModel):
43
+ """
44
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
45
+ models.
46
+ """
47
+
48
+ config_class = RetriBertConfig
49
+ load_tf_weights = None
50
+ base_model_prefix = "retribert"
51
+
52
+ def _init_weights(self, module):
53
+ """Initialize the weights"""
54
+ if isinstance(module, nn.Linear):
55
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
56
+ if module.bias is not None:
57
+ module.bias.data.zero_()
58
+ elif isinstance(module, nn.Embedding):
59
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
60
+ if module.padding_idx is not None:
61
+ module.weight.data[module.padding_idx].zero_()
62
+ elif isinstance(module, nn.LayerNorm):
63
+ module.bias.data.zero_()
64
+ module.weight.data.fill_(1.0)
65
+
66
+
67
+ RETRIBERT_START_DOCSTRING = r"""
68
+
69
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
70
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
71
+ etc.)
72
+
73
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
74
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
75
+ and behavior.
76
+
77
+ Parameters:
78
+ config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
79
+ Initializing with a config file does not load the weights associated with the model, only the
80
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
81
+ """
82
+
83
+
84
+ @add_start_docstrings(
85
+ """Bert Based model to embed queries or document for document retrieval.""",
86
+ RETRIBERT_START_DOCSTRING,
87
+ )
88
+ class RetriBertModel(RetriBertPreTrainedModel):
89
+ def __init__(self, config: RetriBertConfig) -> None:
90
+ super().__init__(config)
91
+ self.projection_dim = config.projection_dim
92
+
93
+ self.bert_query = BertModel(config)
94
+ self.bert_doc = None if config.share_encoders else BertModel(config)
95
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
96
+ self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
97
+ self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
98
+
99
+ self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
100
+
101
+ # Initialize weights and apply final processing
102
+ self.post_init()
103
+
104
+ def embed_sentences_checkpointed(
105
+ self,
106
+ input_ids,
107
+ attention_mask,
108
+ sent_encoder,
109
+ checkpoint_batch_size=-1,
110
+ ):
111
+ # reproduces BERT forward pass with checkpointing
112
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
113
+ return sent_encoder(input_ids, attention_mask=attention_mask)[1]
114
+ else:
115
+ # prepare implicit variables
116
+ device = input_ids.device
117
+ input_shape = input_ids.size()
118
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
119
+ head_mask = [None] * sent_encoder.config.num_hidden_layers
120
+ extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
121
+ attention_mask, input_shape
122
+ )
123
+
124
+ # define function for checkpointing
125
+ def partial_encode(*inputs):
126
+ encoder_outputs = sent_encoder.encoder(
127
+ inputs[0],
128
+ attention_mask=inputs[1],
129
+ head_mask=head_mask,
130
+ )
131
+ sequence_output = encoder_outputs[0]
132
+ pooled_output = sent_encoder.pooler(sequence_output)
133
+ return pooled_output
134
+
135
+ # run embedding layer on everything at once
136
+ embedding_output = sent_encoder.embeddings(
137
+ input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
138
+ )
139
+ # run encoding and pooling on one mini-batch at a time
140
+ pooled_output_list = []
141
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
142
+ b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
143
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
144
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
145
+ pooled_output_list.append(pooled_output)
146
+ return torch.cat(pooled_output_list, dim=0)
147
+
148
+ def embed_questions(
149
+ self,
150
+ input_ids,
151
+ attention_mask=None,
152
+ checkpoint_batch_size=-1,
153
+ ):
154
+ q_reps = self.embed_sentences_checkpointed(
155
+ input_ids,
156
+ attention_mask,
157
+ self.bert_query,
158
+ checkpoint_batch_size,
159
+ )
160
+ return self.project_query(q_reps)
161
+
162
+ def embed_answers(
163
+ self,
164
+ input_ids,
165
+ attention_mask=None,
166
+ checkpoint_batch_size=-1,
167
+ ):
168
+ a_reps = self.embed_sentences_checkpointed(
169
+ input_ids,
170
+ attention_mask,
171
+ self.bert_query if self.bert_doc is None else self.bert_doc,
172
+ checkpoint_batch_size,
173
+ )
174
+ return self.project_doc(a_reps)
175
+
176
+ def forward(
177
+ self,
178
+ input_ids_query: torch.LongTensor,
179
+ attention_mask_query: Optional[torch.FloatTensor],
180
+ input_ids_doc: torch.LongTensor,
181
+ attention_mask_doc: Optional[torch.FloatTensor],
182
+ checkpoint_batch_size: int = -1,
183
+ ) -> torch.FloatTensor:
184
+ r"""
185
+ Args:
186
+ input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
187
+ Indices of input sequence tokens in the vocabulary for the queries in a batch.
188
+
189
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
190
+ [`PreTrainedTokenizer.__call__`] for details.
191
+
192
+ [What are input IDs?](../glossary#input-ids)
193
+ attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
194
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
195
+
196
+ - 1 for tokens that are **not masked**,
197
+ - 0 for tokens that are **masked**.
198
+
199
+ [What are attention masks?](../glossary#attention-mask)
200
+ input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
201
+ Indices of input sequence tokens in the vocabulary for the documents in a batch.
202
+ attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
203
+ Mask to avoid performing attention on documents padding token indices.
204
+ checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
205
+ If greater than 0, uses gradient checkpointing to only compute sequence representation on
206
+ `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
207
+ all document representations in the batch.
208
+
209
+ Return:
210
+ `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
211
+ corresponding document and each document to its corresponding query in the batch
212
+ """
213
+ device = input_ids_query.device
214
+ q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
215
+ a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
216
+ compare_scores = torch.mm(q_reps, a_reps.t())
217
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
218
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
219
+ loss = (loss_qa + loss_aq) / 2
220
+ return loss
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ....utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {
31
+ "vocab_file": {
32
+ "yjernite/retribert-base-uncased": (
33
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
34
+ ),
35
+ }
36
+ }
37
+
38
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
39
+ "yjernite/retribert-base-uncased": 512,
40
+ }
41
+
42
+
43
+ PRETRAINED_INIT_CONFIGURATION = {
44
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
45
+ }
46
+
47
+
48
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
49
+ def load_vocab(vocab_file):
50
+ """Loads a vocabulary file into a dictionary."""
51
+ vocab = collections.OrderedDict()
52
+ with open(vocab_file, "r", encoding="utf-8") as reader:
53
+ tokens = reader.readlines()
54
+ for index, token in enumerate(tokens):
55
+ token = token.rstrip("\n")
56
+ vocab[token] = index
57
+ return vocab
58
+
59
+
60
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
61
+ def whitespace_tokenize(text):
62
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
63
+ text = text.strip()
64
+ if not text:
65
+ return []
66
+ tokens = text.split()
67
+ return tokens
68
+
69
+
70
+ class RetriBertTokenizer(PreTrainedTokenizer):
71
+ r"""
72
+ Constructs a RetriBERT tokenizer.
73
+
74
+ [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
75
+ and wordpiece.
76
+
77
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
78
+ to: this superclass for more information regarding those methods.
79
+
80
+ Args:
81
+ vocab_file (`str`):
82
+ File containing the vocabulary.
83
+ do_lower_case (`bool`, *optional*, defaults to `True`):
84
+ Whether or not to lowercase the input when tokenizing.
85
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to do basic tokenization before WordPiece.
87
+ never_split (`Iterable`, *optional*):
88
+ Collection of tokens which will never be split during tokenization. Only has an effect when
89
+ `do_basic_tokenize=True`
90
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
91
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
92
+ token instead.
93
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
94
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
95
+ sequence classification or for a text and a question for question answering. It is also used as the last
96
+ token of a sequence built with special tokens.
97
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
98
+ The token used for padding, for example when batching sequences of different lengths.
99
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
100
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
101
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
102
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
103
+ The token used for masking values. This is the token used when training this model with masked language
104
+ modeling. This is the token which the model will try to predict.
105
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
106
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
107
+ [issue](https://github.com/huggingface/transformers/issues/328)).
108
+ strip_accents (`bool`, *optional*):
109
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
110
+ value for `lowercase` (as in the original BERT).
111
+ """
112
+
113
+ vocab_files_names = VOCAB_FILES_NAMES
114
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
115
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
116
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+
119
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
120
+ def __init__(
121
+ self,
122
+ vocab_file,
123
+ do_lower_case=True,
124
+ do_basic_tokenize=True,
125
+ never_split=None,
126
+ unk_token="[UNK]",
127
+ sep_token="[SEP]",
128
+ pad_token="[PAD]",
129
+ cls_token="[CLS]",
130
+ mask_token="[MASK]",
131
+ tokenize_chinese_chars=True,
132
+ strip_accents=None,
133
+ **kwargs,
134
+ ):
135
+ if not os.path.isfile(vocab_file):
136
+ raise ValueError(
137
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
138
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
139
+ )
140
+ self.vocab = load_vocab(vocab_file)
141
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
142
+ self.do_basic_tokenize = do_basic_tokenize
143
+ if do_basic_tokenize:
144
+ self.basic_tokenizer = BasicTokenizer(
145
+ do_lower_case=do_lower_case,
146
+ never_split=never_split,
147
+ tokenize_chinese_chars=tokenize_chinese_chars,
148
+ strip_accents=strip_accents,
149
+ )
150
+
151
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
152
+
153
+ super().__init__(
154
+ do_lower_case=do_lower_case,
155
+ do_basic_tokenize=do_basic_tokenize,
156
+ never_split=never_split,
157
+ unk_token=unk_token,
158
+ sep_token=sep_token,
159
+ pad_token=pad_token,
160
+ cls_token=cls_token,
161
+ mask_token=mask_token,
162
+ tokenize_chinese_chars=tokenize_chinese_chars,
163
+ strip_accents=strip_accents,
164
+ **kwargs,
165
+ )
166
+
167
+ @property
168
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
169
+ def do_lower_case(self):
170
+ return self.basic_tokenizer.do_lower_case
171
+
172
+ @property
173
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
174
+ def vocab_size(self):
175
+ return len(self.vocab)
176
+
177
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
178
+ def get_vocab(self):
179
+ return dict(self.vocab, **self.added_tokens_encoder)
180
+
181
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
182
+ def _tokenize(self, text, split_special_tokens=False):
183
+ split_tokens = []
184
+ if self.do_basic_tokenize:
185
+ for token in self.basic_tokenizer.tokenize(
186
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
187
+ ):
188
+ # If the token is part of the never_split set
189
+ if token in self.basic_tokenizer.never_split:
190
+ split_tokens.append(token)
191
+ else:
192
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
193
+ else:
194
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
195
+ return split_tokens
196
+
197
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
198
+ def _convert_token_to_id(self, token):
199
+ """Converts a token (str) in an id using the vocab."""
200
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
201
+
202
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
203
+ def _convert_id_to_token(self, index):
204
+ """Converts an index (integer) in a token (str) using the vocab."""
205
+ return self.ids_to_tokens.get(index, self.unk_token)
206
+
207
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
208
+ def convert_tokens_to_string(self, tokens):
209
+ """Converts a sequence of tokens (string) in a single string."""
210
+ out_string = " ".join(tokens).replace(" ##", "").strip()
211
+ return out_string
212
+
213
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
214
+ def build_inputs_with_special_tokens(
215
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
216
+ ) -> List[int]:
217
+ """
218
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
219
+ adding special tokens. A BERT sequence has the following format:
220
+
221
+ - single sequence: `[CLS] X [SEP]`
222
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
223
+
224
+ Args:
225
+ token_ids_0 (`List[int]`):
226
+ List of IDs to which the special tokens will be added.
227
+ token_ids_1 (`List[int]`, *optional*):
228
+ Optional second list of IDs for sequence pairs.
229
+
230
+ Returns:
231
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
232
+ """
233
+ if token_ids_1 is None:
234
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
235
+ cls = [self.cls_token_id]
236
+ sep = [self.sep_token_id]
237
+ return cls + token_ids_0 + sep + token_ids_1 + sep
238
+
239
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
240
+ def get_special_tokens_mask(
241
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
242
+ ) -> List[int]:
243
+ """
244
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
245
+ special tokens using the tokenizer `prepare_for_model` method.
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
253
+ Whether or not the token list is already formatted with special tokens for the model.
254
+
255
+ Returns:
256
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
257
+ """
258
+
259
+ if already_has_special_tokens:
260
+ return super().get_special_tokens_mask(
261
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
262
+ )
263
+
264
+ if token_ids_1 is not None:
265
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
266
+ return [1] + ([0] * len(token_ids_0)) + [1]
267
+
268
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
269
+ def create_token_type_ids_from_sequences(
270
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
271
+ ) -> List[int]:
272
+ """
273
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
274
+ pair mask has the following format:
275
+
276
+ ```
277
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
278
+ | first sequence | second sequence |
279
+ ```
280
+
281
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
282
+
283
+ Args:
284
+ token_ids_0 (`List[int]`):
285
+ List of IDs.
286
+ token_ids_1 (`List[int]`, *optional*):
287
+ Optional second list of IDs for sequence pairs.
288
+
289
+ Returns:
290
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
291
+ """
292
+ sep = [self.sep_token_id]
293
+ cls = [self.cls_token_id]
294
+ if token_ids_1 is None:
295
+ return len(cls + token_ids_0 + sep) * [0]
296
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
297
+
298
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
299
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
300
+ index = 0
301
+ if os.path.isdir(save_directory):
302
+ vocab_file = os.path.join(
303
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
304
+ )
305
+ else:
306
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
307
+ with open(vocab_file, "w", encoding="utf-8") as writer:
308
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
309
+ if index != token_index:
310
+ logger.warning(
311
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
312
+ " Please check that the vocabulary is not corrupted!"
313
+ )
314
+ index = token_index
315
+ writer.write(token + "\n")
316
+ index += 1
317
+ return (vocab_file,)
318
+
319
+
320
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
321
+ class BasicTokenizer(object):
322
+ """
323
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
324
+
325
+ Args:
326
+ do_lower_case (`bool`, *optional*, defaults to `True`):
327
+ Whether or not to lowercase the input when tokenizing.
328
+ never_split (`Iterable`, *optional*):
329
+ Collection of tokens which will never be split during tokenization. Only has an effect when
330
+ `do_basic_tokenize=True`
331
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
332
+ Whether or not to tokenize Chinese characters.
333
+
334
+ This should likely be deactivated for Japanese (see this
335
+ [issue](https://github.com/huggingface/transformers/issues/328)).
336
+ strip_accents (`bool`, *optional*):
337
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
338
+ value for `lowercase` (as in the original BERT).
339
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
340
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
341
+ the full context of the words, such as contractions.
342
+ """
343
+
344
+ def __init__(
345
+ self,
346
+ do_lower_case=True,
347
+ never_split=None,
348
+ tokenize_chinese_chars=True,
349
+ strip_accents=None,
350
+ do_split_on_punc=True,
351
+ ):
352
+ if never_split is None:
353
+ never_split = []
354
+ self.do_lower_case = do_lower_case
355
+ self.never_split = set(never_split)
356
+ self.tokenize_chinese_chars = tokenize_chinese_chars
357
+ self.strip_accents = strip_accents
358
+ self.do_split_on_punc = do_split_on_punc
359
+
360
+ def tokenize(self, text, never_split=None):
361
+ """
362
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
363
+
364
+ Args:
365
+ never_split (`List[str]`, *optional*)
366
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
367
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
368
+ """
369
+ # union() returns a new set by concatenating the two sets.
370
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
371
+ text = self._clean_text(text)
372
+
373
+ # This was added on November 1st, 2018 for the multilingual and Chinese
374
+ # models. This is also applied to the English models now, but it doesn't
375
+ # matter since the English models were not trained on any Chinese data
376
+ # and generally don't have any Chinese data in them (there are Chinese
377
+ # characters in the vocabulary because Wikipedia does have some Chinese
378
+ # words in the English Wikipedia.).
379
+ if self.tokenize_chinese_chars:
380
+ text = self._tokenize_chinese_chars(text)
381
+ # prevents treating the same character with different unicode codepoints as different characters
382
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
383
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
384
+ split_tokens = []
385
+ for token in orig_tokens:
386
+ if token not in never_split:
387
+ if self.do_lower_case:
388
+ token = token.lower()
389
+ if self.strip_accents is not False:
390
+ token = self._run_strip_accents(token)
391
+ elif self.strip_accents:
392
+ token = self._run_strip_accents(token)
393
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
394
+
395
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
396
+ return output_tokens
397
+
398
+ def _run_strip_accents(self, text):
399
+ """Strips accents from a piece of text."""
400
+ text = unicodedata.normalize("NFD", text)
401
+ output = []
402
+ for char in text:
403
+ cat = unicodedata.category(char)
404
+ if cat == "Mn":
405
+ continue
406
+ output.append(char)
407
+ return "".join(output)
408
+
409
+ def _run_split_on_punc(self, text, never_split=None):
410
+ """Splits punctuation on a piece of text."""
411
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
412
+ return [text]
413
+ chars = list(text)
414
+ i = 0
415
+ start_new_word = True
416
+ output = []
417
+ while i < len(chars):
418
+ char = chars[i]
419
+ if _is_punctuation(char):
420
+ output.append([char])
421
+ start_new_word = True
422
+ else:
423
+ if start_new_word:
424
+ output.append([])
425
+ start_new_word = False
426
+ output[-1].append(char)
427
+ i += 1
428
+
429
+ return ["".join(x) for x in output]
430
+
431
+ def _tokenize_chinese_chars(self, text):
432
+ """Adds whitespace around any CJK character."""
433
+ output = []
434
+ for char in text:
435
+ cp = ord(char)
436
+ if self._is_chinese_char(cp):
437
+ output.append(" ")
438
+ output.append(char)
439
+ output.append(" ")
440
+ else:
441
+ output.append(char)
442
+ return "".join(output)
443
+
444
+ def _is_chinese_char(self, cp):
445
+ """Checks whether CP is the codepoint of a CJK character."""
446
+ # This defines a "chinese character" as anything in the CJK Unicode block:
447
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
448
+ #
449
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
450
+ # despite its name. The modern Korean Hangul alphabet is a different block,
451
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
452
+ # space-separated words, so they are not treated specially and handled
453
+ # like the all of the other languages.
454
+ if (
455
+ (cp >= 0x4E00 and cp <= 0x9FFF)
456
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
457
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
458
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
459
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
460
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
461
+ or (cp >= 0xF900 and cp <= 0xFAFF)
462
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
463
+ ): #
464
+ return True
465
+
466
+ return False
467
+
468
+ def _clean_text(self, text):
469
+ """Performs invalid character removal and whitespace cleanup on text."""
470
+ output = []
471
+ for char in text:
472
+ cp = ord(char)
473
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
474
+ continue
475
+ if _is_whitespace(char):
476
+ output.append(" ")
477
+ else:
478
+ output.append(char)
479
+ return "".join(output)
480
+
481
+
482
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
483
+ class WordpieceTokenizer(object):
484
+ """Runs WordPiece tokenization."""
485
+
486
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
487
+ self.vocab = vocab
488
+ self.unk_token = unk_token
489
+ self.max_input_chars_per_word = max_input_chars_per_word
490
+
491
+ def tokenize(self, text):
492
+ """
493
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
494
+ tokenization using the given vocabulary.
495
+
496
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
497
+
498
+ Args:
499
+ text: A single token or whitespace separated tokens. This should have
500
+ already been passed through *BasicTokenizer*.
501
+
502
+ Returns:
503
+ A list of wordpiece tokens.
504
+ """
505
+
506
+ output_tokens = []
507
+ for token in whitespace_tokenize(text):
508
+ chars = list(token)
509
+ if len(chars) > self.max_input_chars_per_word:
510
+ output_tokens.append(self.unk_token)
511
+ continue
512
+
513
+ is_bad = False
514
+ start = 0
515
+ sub_tokens = []
516
+ while start < len(chars):
517
+ end = len(chars)
518
+ cur_substr = None
519
+ while start < end:
520
+ substr = "".join(chars[start:end])
521
+ if start > 0:
522
+ substr = "##" + substr
523
+ if substr in self.vocab:
524
+ cur_substr = substr
525
+ break
526
+ end -= 1
527
+ if cur_substr is None:
528
+ is_bad = True
529
+ break
530
+ sub_tokens.append(cur_substr)
531
+ start = end
532
+
533
+ if is_bad:
534
+ output_tokens.append(self.unk_token)
535
+ else:
536
+ output_tokens.extend(sub_tokens)
537
+ return output_tokens
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for RetriBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ....tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ....utils import logging
24
+ from .tokenization_retribert import RetriBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "yjernite/retribert-base-uncased": (
34
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
35
+ ),
36
+ },
37
+ "tokenizer_file": {
38
+ "yjernite/retribert-base-uncased": (
39
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
40
+ ),
41
+ },
42
+ }
43
+
44
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
45
+ "yjernite/retribert-base-uncased": 512,
46
+ }
47
+
48
+
49
+ PRETRAINED_INIT_CONFIGURATION = {
50
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
51
+ }
52
+
53
+
54
+ class RetriBertTokenizerFast(PreTrainedTokenizerFast):
55
+ r"""
56
+ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
57
+
58
+ [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
59
+ splitting and wordpiece.
60
+
61
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
62
+ refer to this superclass for more information regarding those methods.
63
+
64
+ Args:
65
+ vocab_file (`str`):
66
+ File containing the vocabulary.
67
+ do_lower_case (`bool`, *optional*, defaults to `True`):
68
+ Whether or not to lowercase the input when tokenizing.
69
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ clean_text (`bool`, *optional*, defaults to `True`):
85
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
86
+ whitespaces by the classic one.
87
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
88
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
89
+ issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original BERT).
93
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
94
+ The prefix for subwords.
95
+ """
96
+
97
+ vocab_files_names = VOCAB_FILES_NAMES
98
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
99
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
100
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
101
+ slow_tokenizer_class = RetriBertTokenizer
102
+ model_input_names = ["input_ids", "attention_mask"]
103
+
104
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
105
+ def __init__(
106
+ self,
107
+ vocab_file=None,
108
+ tokenizer_file=None,
109
+ do_lower_case=True,
110
+ unk_token="[UNK]",
111
+ sep_token="[SEP]",
112
+ pad_token="[PAD]",
113
+ cls_token="[CLS]",
114
+ mask_token="[MASK]",
115
+ tokenize_chinese_chars=True,
116
+ strip_accents=None,
117
+ **kwargs,
118
+ ):
119
+ super().__init__(
120
+ vocab_file,
121
+ tokenizer_file=tokenizer_file,
122
+ do_lower_case=do_lower_case,
123
+ unk_token=unk_token,
124
+ sep_token=sep_token,
125
+ pad_token=pad_token,
126
+ cls_token=cls_token,
127
+ mask_token=mask_token,
128
+ tokenize_chinese_chars=tokenize_chinese_chars,
129
+ strip_accents=strip_accents,
130
+ **kwargs,
131
+ )
132
+
133
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
134
+ if (
135
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
136
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
137
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
138
+ ):
139
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
140
+ normalizer_state["lowercase"] = do_lower_case
141
+ normalizer_state["strip_accents"] = strip_accents
142
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
143
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
144
+
145
+ self.do_lower_case = do_lower_case
146
+
147
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
148
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
149
+ """
150
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
151
+ adding special tokens. A BERT sequence has the following format:
152
+
153
+ - single sequence: `[CLS] X [SEP]`
154
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
155
+
156
+ Args:
157
+ token_ids_0 (`List[int]`):
158
+ List of IDs to which the special tokens will be added.
159
+ token_ids_1 (`List[int]`, *optional*):
160
+ Optional second list of IDs for sequence pairs.
161
+
162
+ Returns:
163
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
164
+ """
165
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
166
+
167
+ if token_ids_1 is not None:
168
+ output += token_ids_1 + [self.sep_token_id]
169
+
170
+ return output
171
+
172
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
173
+ def create_token_type_ids_from_sequences(
174
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
175
+ ) -> List[int]:
176
+ """
177
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
178
+ pair mask has the following format:
179
+
180
+ ```
181
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
182
+ | first sequence | second sequence |
183
+ ```
184
+
185
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
186
+
187
+ Args:
188
+ token_ids_0 (`List[int]`):
189
+ List of IDs.
190
+ token_ids_1 (`List[int]`, *optional*):
191
+ Optional second list of IDs for sequence pairs.
192
+
193
+ Returns:
194
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
195
+ """
196
+ sep = [self.sep_token_id]
197
+ cls = [self.cls_token_id]
198
+ if token_ids_1 is None:
199
+ return len(cls + token_ids_0 + sep) * [0]
200
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
201
+
202
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
203
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
204
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
205
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ....utils import _LazyModule
17
+
18
+
19
+ _import_structure = {"tokenization_tapex": ["TapexTokenizer"]}
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from .tokenization_tapex import TapexTokenizer
24
+
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (483 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc ADDED
Binary file (42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py ADDED
@@ -0,0 +1,1487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for TAPEX."""
16
+
17
+ import json
18
+ import os
19
+ import random
20
+ from functools import lru_cache
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ import regex as re
24
+
25
+ from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available
26
+ from ....tokenization_utils import AddedToken, PreTrainedTokenizer
27
+ from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy
28
+ from ....utils import logging
29
+
30
+
31
+ if is_pandas_available():
32
+ import pandas as pd
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
38
+
39
+ PRETRAINED_VOCAB_FILES_MAP = {
40
+ "vocab_file": {
41
+ "microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/vocab.json",
42
+ },
43
+ "merges_file": {
44
+ "microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/merges.txt",
45
+ },
46
+ }
47
+
48
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
49
+ "microsoft/tapex-base": 512,
50
+ }
51
+
52
+ PRETRAINED_INIT_CONFIGURATION = {
53
+ "microsoft/tapex-base": {"do_lower_case": True},
54
+ }
55
+
56
+
57
+ class TapexTruncationStrategy(ExplicitEnum):
58
+ """
59
+ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
60
+ """
61
+
62
+ DROP_ROWS_TO_FIT = "drop_rows_to_fit"
63
+
64
+
65
+ TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
66
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to encode the sequences with the special tokens relative to their model.
68
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
69
+ Activates and controls padding. Accepts the following values:
70
+
71
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
72
+ sequence if provided).
73
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
74
+ acceptable input length for the model if that argument is not provided.
75
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
76
+ lengths).
77
+ truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],
78
+ *optional*, defaults to `False`):
79
+
80
+ Activates and controls truncation. Accepts the following values:
81
+
82
+ - `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the
83
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
84
+ row by row, removing rows from the table.
85
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
86
+ to the maximum acceptable input length for the model if that argument is not provided. This will
87
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
88
+ sequences (or a batch of pairs) is provided.
89
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
90
+ maximum acceptable input length for the model if that argument is not provided. This will only
91
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
92
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
93
+ maximum acceptable input length for the model if that argument is not provided. This will only
94
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
95
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
96
+ greater than the model maximum admissible input size).
97
+ max_length (`int`, *optional*):
98
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
99
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
100
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
101
+ truncation/padding to a maximum length will be deactivated.
102
+ stride (`int`, *optional*, defaults to 0):
103
+ If set to a number along with `max_length`, the overflowing tokens returned when
104
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
105
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
106
+ argument defines the number of overlapping tokens.
107
+ pad_to_multiple_of (`int`, *optional*):
108
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
109
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
110
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
111
+ If set, will return tensors instead of list of python integers. Acceptable values are:
112
+
113
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
114
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
115
+ - `'np'`: Return Numpy `np.ndarray` objects.
116
+ """
117
+
118
+
119
+ @lru_cache()
120
+ def bytes_to_unicode():
121
+ """
122
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
123
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
124
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
125
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
126
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
127
+ """
128
+ bs = (
129
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
130
+ )
131
+ cs = bs[:]
132
+ n = 0
133
+ for b in range(2**8):
134
+ if b not in bs:
135
+ bs.append(b)
136
+ cs.append(2**8 + n)
137
+ n += 1
138
+ cs = [chr(n) for n in cs]
139
+ return dict(zip(bs, cs))
140
+
141
+
142
+ def get_pairs(word):
143
+ """
144
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
145
+ strings).
146
+ """
147
+ pairs = set()
148
+ prev_char = word[0]
149
+ for char in word[1:]:
150
+ pairs.add((prev_char, char))
151
+ prev_char = char
152
+ return pairs
153
+
154
+
155
+ class IndexedRowTableLinearize:
156
+ """
157
+ FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
158
+ """
159
+
160
+ def process_table(self, table_content: Dict):
161
+ """
162
+ Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.
163
+ """
164
+ assert "header" in table_content and "rows" in table_content, self.PROMPT_MESSAGE
165
+ # process header
166
+ table_str = self.process_header(table_content["header"]) + " "
167
+ # process rows
168
+ for i, row_example in enumerate(table_content["rows"]):
169
+ # NOTE: the row should start from row 1 instead of 0
170
+ table_str += self.process_row(row_example, row_index=i + 1) + " "
171
+ return table_str.strip()
172
+
173
+ def process_header(self, headers: List):
174
+ """
175
+ Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols.
176
+ """
177
+ return "col : " + " | ".join(headers)
178
+
179
+ def process_row(self, row: List, row_index: int):
180
+ """
181
+ Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols.
182
+ """
183
+ row_str = ""
184
+ row_cell_values = []
185
+ for cell_value in row:
186
+ if isinstance(cell_value, int):
187
+ row_cell_values.append(str(cell_value))
188
+ else:
189
+ row_cell_values.append(cell_value)
190
+ row_str += " | ".join(row_cell_values)
191
+ return "row " + str(row_index) + " : " + row_str
192
+
193
+
194
+ class TapexTokenizer(PreTrainedTokenizer):
195
+ r"""
196
+ Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
197
+
198
+ This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences
199
+ to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:
200
+
201
+ sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
202
+
203
+ The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table
204
+ will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated
205
+ for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to
206
+ the tokenizer for instance to prepare them for the model.
207
+
208
+ Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2.
209
+
210
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
211
+ this superclass for more information regarding those methods.
212
+
213
+ Args:
214
+ vocab_file (`str`):
215
+ Path to the vocabulary file.
216
+ merges_file (`str`):
217
+ Path to the merges file.
218
+ do_lower_case (`bool`, *optional*, defaults to `True`):
219
+ Whether or not to lowercase the input when tokenizing.
220
+ errors (`str`, *optional*, defaults to `"replace"`):
221
+ Paradigm to follow when decoding bytes to UTF-8. See
222
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
223
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
224
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
225
+
226
+ <Tip>
227
+
228
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
229
+ sequence. The token used is the `cls_token`.
230
+
231
+ </Tip>
232
+
233
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
234
+ The end of sequence token.
235
+
236
+ <Tip>
237
+
238
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
239
+ The token used is the `sep_token`.
240
+
241
+ </Tip>
242
+
243
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
244
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
245
+ sequence classification or for a text and a question for question answering. It is also used as the last
246
+ token of a sequence built with special tokens.
247
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
248
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
249
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
250
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
251
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
252
+ token instead.
253
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
254
+ The token used for padding, for example when batching sequences of different lengths.
255
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
256
+ The token used for masking values. This is the token used when training this model with masked language
257
+ modeling. This is the token which the model will try to predict.
258
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
259
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
260
+ other word. (BART tokenizer detect beginning of words by the preceding space).
261
+ max_cell_length (`int`, *optional*, defaults to 15):
262
+ Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation
263
+ takes place.
264
+ """
265
+
266
+ vocab_files_names = VOCAB_FILES_NAMES
267
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
268
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
269
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
270
+ model_input_names = ["input_ids", "attention_mask"]
271
+
272
+ def __init__(
273
+ self,
274
+ vocab_file,
275
+ merges_file,
276
+ do_lower_case=True,
277
+ errors="replace",
278
+ bos_token="<s>",
279
+ eos_token="</s>",
280
+ sep_token="</s>",
281
+ cls_token="<s>",
282
+ unk_token="<unk>",
283
+ pad_token="<pad>",
284
+ mask_token="<mask>",
285
+ add_prefix_space=False,
286
+ max_cell_length=15,
287
+ **kwargs,
288
+ ):
289
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
290
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
291
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
292
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
293
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
294
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
295
+
296
+ # Mask token behave like a normal word, i.e. include the space before it
297
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
298
+
299
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
300
+ self.encoder = json.load(vocab_handle)
301
+ self.decoder = {v: k for k, v in self.encoder.items()}
302
+ self.errors = errors # how to handle errors in decoding
303
+ self.byte_encoder = bytes_to_unicode()
304
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
305
+ with open(merges_file, encoding="utf-8") as merges_handle:
306
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
307
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
308
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
309
+ self.cache = {}
310
+ self.add_prefix_space = add_prefix_space
311
+ self.do_lower_case = do_lower_case
312
+
313
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
314
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
315
+
316
+ # additional properties
317
+
318
+ super().__init__(
319
+ vocab_file=vocab_file,
320
+ merges_file=merges_file,
321
+ do_lower_case=do_lower_case,
322
+ errors=errors,
323
+ bos_token=bos_token,
324
+ eos_token=eos_token,
325
+ unk_token=unk_token,
326
+ sep_token=sep_token,
327
+ cls_token=cls_token,
328
+ pad_token=pad_token,
329
+ mask_token=mask_token,
330
+ add_prefix_space=add_prefix_space,
331
+ max_cell_length=max_cell_length,
332
+ **kwargs,
333
+ )
334
+
335
+ self.max_cell_length = max_cell_length
336
+ self.table_linearize = IndexedRowTableLinearize()
337
+
338
+ def build_inputs_with_special_tokens(
339
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
340
+ ) -> List[int]:
341
+ """
342
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
343
+ adding special tokens. A TAPEX sequence has the following format:
344
+ - single sequence: `<s> X </s>`
345
+ - pair of sequences: `<s> A </s></s> B </s>`
346
+
347
+ Args:
348
+ token_ids_0 (`List[int]`):
349
+ List of IDs to which the special tokens will be added.
350
+ token_ids_1 (`List[int]`, *optional*):
351
+ Optional second list of IDs for sequence pairs.
352
+ Returns:
353
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
354
+ """
355
+ if token_ids_1 is None:
356
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
357
+ cls = [self.cls_token_id]
358
+ sep = [self.sep_token_id]
359
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
360
+
361
+ def get_special_tokens_mask(
362
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
363
+ ) -> List[int]:
364
+ """
365
+ Args:
366
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
367
+ special tokens using the tokenizer `prepare_for_model` method.
368
+ token_ids_0 (`List[int]`):
369
+ List of IDs.
370
+ token_ids_1 (`List[int]`, *optional*):
371
+ Optional second list of IDs for sequence pairs.
372
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
373
+ Whether or not the token list is already formatted with special tokens for the model.
374
+ Returns:
375
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
376
+ """
377
+ if already_has_special_tokens:
378
+ return super().get_special_tokens_mask(
379
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
380
+ )
381
+
382
+ if token_ids_1 is None:
383
+ return [1] + ([0] * len(token_ids_0)) + [1]
384
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
385
+
386
+ def create_token_type_ids_from_sequences(
387
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
388
+ ) -> List[int]:
389
+ """
390
+ Args:
391
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not:
392
+ make use of token type ids, therefore a list of zeros is returned.
393
+ token_ids_0 (`List[int]`):
394
+ List of IDs.
395
+ token_ids_1 (`List[int]`, *optional*):
396
+ Optional second list of IDs for sequence pairs.
397
+ Returns:
398
+ `List[int]`: List of zeros.
399
+ """
400
+ sep = [self.sep_token_id]
401
+ cls = [self.cls_token_id]
402
+
403
+ if token_ids_1 is None:
404
+ return len(cls + token_ids_0 + sep) * [0]
405
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
406
+
407
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
408
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
409
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
410
+ text = " " + text
411
+ return (text, kwargs)
412
+
413
+ @property
414
+ def vocab_size(self):
415
+ return len(self.encoder)
416
+
417
+ def get_vocab(self):
418
+ return dict(self.encoder, **self.added_tokens_encoder)
419
+
420
+ def bpe(self, token):
421
+ if token in self.cache:
422
+ return self.cache[token]
423
+ word = tuple(token)
424
+ pairs = get_pairs(word)
425
+
426
+ if not pairs:
427
+ return token
428
+
429
+ while True:
430
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
431
+ if bigram not in self.bpe_ranks:
432
+ break
433
+ first, second = bigram
434
+ new_word = []
435
+ i = 0
436
+ while i < len(word):
437
+ try:
438
+ j = word.index(first, i)
439
+ except ValueError:
440
+ new_word.extend(word[i:])
441
+ break
442
+ else:
443
+ new_word.extend(word[i:j])
444
+ i = j
445
+
446
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
447
+ new_word.append(first + second)
448
+ i += 2
449
+ else:
450
+ new_word.append(word[i])
451
+ i += 1
452
+ new_word = tuple(new_word)
453
+ word = new_word
454
+ if len(word) == 1:
455
+ break
456
+ else:
457
+ pairs = get_pairs(word)
458
+ word = " ".join(word)
459
+ self.cache[token] = word
460
+ return word
461
+
462
+ def _tokenize(self, text):
463
+ """Tokenize a string."""
464
+ bpe_tokens = []
465
+ for token in re.findall(self.pat, text):
466
+ token = "".join(
467
+ self.byte_encoder[b] for b in token.encode("utf-8")
468
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
469
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
470
+ return bpe_tokens
471
+
472
+ def _convert_token_to_id(self, token):
473
+ """Converts a token (str) in an id using the vocab."""
474
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
475
+
476
+ def _convert_id_to_token(self, index):
477
+ """Converts an index (integer) in a token (str) using the vocab."""
478
+ return self.decoder.get(index)
479
+
480
+ def convert_tokens_to_string(self, tokens):
481
+ """Converts a sequence of tokens (string) in a single string."""
482
+ text = "".join(tokens)
483
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
484
+ return text
485
+
486
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
487
+ if not os.path.isdir(save_directory):
488
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
489
+ return
490
+ vocab_file = os.path.join(
491
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
492
+ )
493
+ merge_file = os.path.join(
494
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
495
+ )
496
+
497
+ with open(vocab_file, "w", encoding="utf-8") as f:
498
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
499
+
500
+ index = 0
501
+ with open(merge_file, "w", encoding="utf-8") as writer:
502
+ writer.write("#version: 0.2\n")
503
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
504
+ if index != token_index:
505
+ logger.warning(
506
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
507
+ " Please check that the tokenizer is not corrupted!"
508
+ )
509
+ index = token_index
510
+ writer.write(" ".join(bpe_tokens) + "\n")
511
+ index += 1
512
+
513
+ return vocab_file, merge_file
514
+
515
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
516
+ def __call__(
517
+ self,
518
+ table: Union["pd.DataFrame", List["pd.DataFrame"]] = None,
519
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
520
+ answer: Union[str, List[str]] = None,
521
+ add_special_tokens: bool = True,
522
+ padding: Union[bool, str, PaddingStrategy] = False,
523
+ truncation: Union[bool, str, TruncationStrategy] = None,
524
+ max_length: Optional[int] = None,
525
+ stride: int = 0,
526
+ pad_to_multiple_of: Optional[int] = None,
527
+ return_tensors: Optional[Union[str, TensorType]] = None,
528
+ return_token_type_ids: Optional[bool] = None,
529
+ return_attention_mask: Optional[bool] = None,
530
+ return_overflowing_tokens: bool = False,
531
+ return_special_tokens_mask: bool = False,
532
+ return_offsets_mapping: bool = False,
533
+ return_length: bool = False,
534
+ verbose: bool = True,
535
+ **kwargs,
536
+ ) -> BatchEncoding:
537
+ """
538
+ Main method to tokenize and prepare for the model one or several table-sequence pair(s).
539
+
540
+ Args:
541
+ table (`pd.DataFrame`, `List[pd.DataFrame]`):
542
+ Table(s) containing tabular data.
543
+ query (`str` or `List[str]`, *optional*):
544
+ Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of
545
+ sentences must match the number of tables.
546
+ answer (`str` or `List[str]`, *optional*):
547
+ Optionally, the corresponding answer to the questions as supervision.
548
+ """
549
+
550
+ if table is not None:
551
+ return self.source_call_func(
552
+ table=table,
553
+ query=query,
554
+ answer=answer,
555
+ add_special_tokens=add_special_tokens,
556
+ padding=padding,
557
+ truncation=truncation,
558
+ max_length=max_length,
559
+ stride=stride,
560
+ pad_to_multiple_of=pad_to_multiple_of,
561
+ return_tensors=return_tensors,
562
+ return_token_type_ids=return_token_type_ids,
563
+ return_attention_mask=return_attention_mask,
564
+ return_overflowing_tokens=return_overflowing_tokens,
565
+ return_special_tokens_mask=return_special_tokens_mask,
566
+ return_offsets_mapping=return_offsets_mapping,
567
+ return_length=return_length,
568
+ verbose=verbose,
569
+ **kwargs,
570
+ )
571
+ elif answer is not None:
572
+ return self.target_call_func(
573
+ answer=answer,
574
+ add_special_tokens=add_special_tokens,
575
+ padding=padding,
576
+ truncation=truncation,
577
+ max_length=max_length,
578
+ stride=stride,
579
+ pad_to_multiple_of=pad_to_multiple_of,
580
+ return_tensors=return_tensors,
581
+ return_token_type_ids=return_token_type_ids,
582
+ return_attention_mask=return_attention_mask,
583
+ return_overflowing_tokens=return_overflowing_tokens,
584
+ return_special_tokens_mask=return_special_tokens_mask,
585
+ return_offsets_mapping=return_offsets_mapping,
586
+ return_length=return_length,
587
+ verbose=verbose,
588
+ **kwargs,
589
+ )
590
+ else:
591
+ raise ValueError("You need to provide either a `table` or an `answer`.")
592
+
593
+ def source_call_func(
594
+ self,
595
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
596
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
597
+ answer: Union[str, List[str]] = None,
598
+ add_special_tokens: bool = True,
599
+ padding: Union[bool, str, PaddingStrategy] = False,
600
+ truncation: Union[bool, str, TruncationStrategy] = None,
601
+ max_length: Optional[int] = None,
602
+ stride: int = 0,
603
+ pad_to_multiple_of: Optional[int] = None,
604
+ return_tensors: Optional[Union[str, TensorType]] = None,
605
+ return_token_type_ids: Optional[bool] = None,
606
+ return_attention_mask: Optional[bool] = None,
607
+ return_overflowing_tokens: bool = False,
608
+ return_special_tokens_mask: bool = False,
609
+ return_offsets_mapping: bool = False,
610
+ return_length: bool = False,
611
+ verbose: bool = True,
612
+ **kwargs,
613
+ ) -> BatchEncoding:
614
+ # Input type checking for clearer error
615
+ valid_table = False
616
+ valid_query = False
617
+
618
+ # Check that table have a valid type
619
+ if isinstance(table, pd.DataFrame):
620
+ valid_table = True
621
+ elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame):
622
+ valid_table = True
623
+
624
+ # Check that query have a valid type
625
+ if query is None or isinstance(query, str):
626
+ valid_query = True
627
+ elif isinstance(query, (list, tuple)):
628
+ if len(query) == 0 or isinstance(query[0], str):
629
+ valid_query = True
630
+
631
+ if not valid_table:
632
+ raise ValueError(
633
+ "table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). "
634
+ )
635
+ if not valid_query:
636
+ raise ValueError("query input must of type `str` (single example), `List[str]` (batch of examples). ")
637
+ is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple))
638
+
639
+ if is_batched:
640
+ return self.batch_encode_plus(
641
+ table=table,
642
+ query=query,
643
+ answer=answer,
644
+ add_special_tokens=add_special_tokens,
645
+ padding=padding,
646
+ truncation=truncation,
647
+ max_length=max_length,
648
+ pad_to_multiple_of=pad_to_multiple_of,
649
+ return_tensors=return_tensors,
650
+ return_token_type_ids=return_token_type_ids,
651
+ return_attention_mask=return_attention_mask,
652
+ return_overflowing_tokens=return_overflowing_tokens,
653
+ return_special_tokens_mask=return_special_tokens_mask,
654
+ return_offsets_mapping=return_offsets_mapping,
655
+ return_length=return_length,
656
+ verbose=verbose,
657
+ **kwargs,
658
+ )
659
+ else:
660
+ return self.encode_plus(
661
+ table=table,
662
+ query=query,
663
+ answer=answer,
664
+ add_special_tokens=add_special_tokens,
665
+ padding=padding,
666
+ truncation=truncation,
667
+ max_length=max_length,
668
+ pad_to_multiple_of=pad_to_multiple_of,
669
+ return_tensors=return_tensors,
670
+ return_token_type_ids=return_token_type_ids,
671
+ return_attention_mask=return_attention_mask,
672
+ return_overflowing_tokens=return_overflowing_tokens,
673
+ return_special_tokens_mask=return_special_tokens_mask,
674
+ return_offsets_mapping=return_offsets_mapping,
675
+ return_length=return_length,
676
+ verbose=verbose,
677
+ **kwargs,
678
+ )
679
+
680
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
681
+ def batch_encode_plus(
682
+ self,
683
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
684
+ query: Optional[List[TextInput]] = None,
685
+ answer: List[str] = None,
686
+ add_special_tokens: bool = True,
687
+ padding: Union[bool, str, PaddingStrategy] = False,
688
+ truncation: Union[bool, str] = None,
689
+ max_length: Optional[int] = None,
690
+ pad_to_multiple_of: Optional[int] = None,
691
+ return_tensors: Optional[Union[str, TensorType]] = None,
692
+ return_token_type_ids: Optional[bool] = None,
693
+ return_attention_mask: Optional[bool] = None,
694
+ return_overflowing_tokens: bool = False,
695
+ return_special_tokens_mask: bool = False,
696
+ return_offsets_mapping: bool = False,
697
+ return_length: bool = False,
698
+ verbose: bool = True,
699
+ **kwargs,
700
+ ) -> BatchEncoding:
701
+ """
702
+ <Tip warning={true}>
703
+
704
+ This method is deprecated, `__call__` should be used instead.
705
+
706
+ </Tip>
707
+ """
708
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
709
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
710
+ padding=padding,
711
+ truncation=truncation,
712
+ max_length=max_length,
713
+ pad_to_multiple_of=pad_to_multiple_of,
714
+ verbose=verbose,
715
+ **kwargs,
716
+ )
717
+
718
+ return self._batch_encode_plus(
719
+ table=table,
720
+ query=query,
721
+ answer=answer,
722
+ add_special_tokens=add_special_tokens,
723
+ padding_strategy=padding_strategy,
724
+ truncation_strategy=truncation_strategy,
725
+ max_length=max_length,
726
+ pad_to_multiple_of=pad_to_multiple_of,
727
+ return_tensors=return_tensors,
728
+ return_token_type_ids=return_token_type_ids,
729
+ return_attention_mask=return_attention_mask,
730
+ return_overflowing_tokens=return_overflowing_tokens,
731
+ return_special_tokens_mask=return_special_tokens_mask,
732
+ return_offsets_mapping=return_offsets_mapping,
733
+ return_length=return_length,
734
+ verbose=verbose,
735
+ **kwargs,
736
+ )
737
+
738
+ def _batch_encode_plus(
739
+ self,
740
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
741
+ query: Optional[List[TextInput]] = None,
742
+ answer: Optional[List[str]] = None,
743
+ add_special_tokens: bool = True,
744
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
745
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
746
+ max_length: Optional[int] = None,
747
+ stride: int = 0,
748
+ pad_to_multiple_of: Optional[int] = None,
749
+ return_tensors: Optional[Union[str, TensorType]] = None,
750
+ return_token_type_ids: Optional[bool] = None,
751
+ return_attention_mask: Optional[bool] = None,
752
+ return_overflowing_tokens: bool = False,
753
+ return_special_tokens_mask: bool = False,
754
+ return_offsets_mapping: bool = False,
755
+ return_length: bool = False,
756
+ verbose: bool = True,
757
+ **kwargs,
758
+ ) -> BatchEncoding:
759
+ if return_offsets_mapping:
760
+ raise NotImplementedError(
761
+ "return_offset_mapping is not available when using Python tokenizers. "
762
+ "To use this feature, change your tokenizer to one deriving from "
763
+ "transformers.PreTrainedTokenizerFast."
764
+ )
765
+
766
+ if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)):
767
+ # single table, many queries case
768
+ # duplicate table for every query
769
+ table = [table] * len(query)
770
+ if isinstance(table, (list, tuple)) and isinstance(query, str):
771
+ # many tables, single query case
772
+ # duplicate query for every table
773
+ query = [query] * len(table)
774
+
775
+ batch_outputs = self._batch_prepare_for_model(
776
+ table=table,
777
+ query=query,
778
+ answer=answer,
779
+ add_special_tokens=add_special_tokens,
780
+ padding_strategy=padding_strategy,
781
+ truncation_strategy=truncation_strategy,
782
+ max_length=max_length,
783
+ stride=stride,
784
+ pad_to_multiple_of=pad_to_multiple_of,
785
+ return_attention_mask=return_attention_mask,
786
+ return_token_type_ids=return_token_type_ids,
787
+ return_overflowing_tokens=return_overflowing_tokens,
788
+ return_special_tokens_mask=return_special_tokens_mask,
789
+ return_length=return_length,
790
+ return_tensors=return_tensors,
791
+ verbose=verbose,
792
+ )
793
+
794
+ return BatchEncoding(batch_outputs)
795
+
796
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
797
+ def _batch_prepare_for_model(
798
+ self,
799
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
800
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
801
+ answer: Optional[Union[str, List[str]]] = None,
802
+ add_special_tokens: bool = True,
803
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
804
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
805
+ max_length: Optional[int] = None,
806
+ stride: int = 0,
807
+ pad_to_multiple_of: Optional[int] = None,
808
+ return_tensors: Optional[str] = None,
809
+ return_token_type_ids: Optional[bool] = None,
810
+ return_attention_mask: Optional[bool] = None,
811
+ return_overflowing_tokens: bool = False,
812
+ return_special_tokens_mask: bool = False,
813
+ return_length: bool = False,
814
+ verbose: bool = True,
815
+ ) -> BatchEncoding:
816
+ """
817
+ This method adds special tokens, truncates sequences if overflowing while taking into account the special
818
+ tokens and manages a moving window (with user defined stride) for overflowing tokens.
819
+ """
820
+ batch_outputs = {}
821
+ if answer is None:
822
+ answer = [None] * len(table)
823
+ for _table, _query, _answer in zip(table, query, answer):
824
+ text = self.prepare_table_query(
825
+ _table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length
826
+ )
827
+
828
+ if self.do_lower_case:
829
+ text = text.lower()
830
+
831
+ tokens = self.tokenize(text)
832
+ outputs = self.prepare_for_model(
833
+ ids=self.convert_tokens_to_ids(tokens),
834
+ add_special_tokens=add_special_tokens,
835
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
836
+ truncation=truncation_strategy.value,
837
+ max_length=max_length,
838
+ stride=stride,
839
+ pad_to_multiple_of=None, # we pad in batch afterwards
840
+ return_attention_mask=False, # we pad in batch afterwards
841
+ return_token_type_ids=return_token_type_ids,
842
+ return_overflowing_tokens=return_overflowing_tokens,
843
+ return_special_tokens_mask=return_special_tokens_mask,
844
+ return_length=return_length,
845
+ return_tensors=None, # We convert the whole batch to tensors at the end
846
+ prepend_batch_axis=False,
847
+ verbose=verbose,
848
+ )
849
+
850
+ for key, value in outputs.items():
851
+ if key not in batch_outputs:
852
+ batch_outputs[key] = []
853
+ batch_outputs[key].append(value)
854
+
855
+ batch_outputs = self.pad(
856
+ batch_outputs,
857
+ padding=padding_strategy.value,
858
+ max_length=max_length,
859
+ pad_to_multiple_of=pad_to_multiple_of,
860
+ return_attention_mask=return_attention_mask,
861
+ )
862
+
863
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
864
+
865
+ return batch_outputs
866
+
867
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
868
+ def encode(
869
+ self,
870
+ table: "pd.DataFrame",
871
+ query: Optional[TextInput] = None,
872
+ answer: Optional[str] = None,
873
+ add_special_tokens: bool = True,
874
+ padding: Union[bool, str, PaddingStrategy] = False,
875
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
876
+ max_length: Optional[int] = None,
877
+ return_tensors: Optional[Union[str, TensorType]] = None,
878
+ **kwargs,
879
+ ) -> List[int]:
880
+ """
881
+ Prepare a table, a string and possible answer for the model. This method does not return token type IDs,
882
+ attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build
883
+ your processing on your own, otherwise refer to `__call__`.
884
+ """
885
+ encoded_inputs = self.encode_plus(
886
+ table,
887
+ query=query,
888
+ answer=answer,
889
+ add_special_tokens=add_special_tokens,
890
+ padding=padding,
891
+ truncation=truncation,
892
+ max_length=max_length,
893
+ return_tensors=return_tensors,
894
+ **kwargs,
895
+ )
896
+
897
+ return encoded_inputs["input_ids"]
898
+
899
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
900
+ def encode_plus(
901
+ self,
902
+ table: "pd.DataFrame",
903
+ query: Optional[TextInput] = None,
904
+ answer: Optional[str] = None,
905
+ add_special_tokens: bool = True,
906
+ padding: Union[bool, str, PaddingStrategy] = False,
907
+ truncation: Union[bool, str] = None,
908
+ max_length: Optional[int] = None,
909
+ pad_to_multiple_of: Optional[int] = None,
910
+ return_tensors: Optional[Union[str, TensorType]] = None,
911
+ return_token_type_ids: Optional[bool] = None,
912
+ return_attention_mask: Optional[bool] = None,
913
+ return_special_tokens_mask: bool = False,
914
+ return_offsets_mapping: bool = False,
915
+ return_length: bool = False,
916
+ verbose: bool = True,
917
+ **kwargs,
918
+ ) -> BatchEncoding:
919
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
920
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
921
+ padding=padding,
922
+ truncation=truncation,
923
+ max_length=max_length,
924
+ pad_to_multiple_of=pad_to_multiple_of,
925
+ verbose=verbose,
926
+ **kwargs,
927
+ )
928
+
929
+ return self._encode_plus(
930
+ table=table,
931
+ query=query,
932
+ answer=answer,
933
+ add_special_tokens=add_special_tokens,
934
+ padding_strategy=padding_strategy,
935
+ truncation_strategy=truncation_strategy,
936
+ max_length=max_length,
937
+ pad_to_multiple_of=pad_to_multiple_of,
938
+ return_tensors=return_tensors,
939
+ return_token_type_ids=return_token_type_ids,
940
+ return_attention_mask=return_attention_mask,
941
+ return_special_tokens_mask=return_special_tokens_mask,
942
+ return_offsets_mapping=return_offsets_mapping,
943
+ return_length=return_length,
944
+ verbose=verbose,
945
+ **kwargs,
946
+ )
947
+
948
+ def _encode_plus(
949
+ self,
950
+ table: "pd.DataFrame",
951
+ query: Optional[TextInput] = None,
952
+ answer: Optional[str] = None,
953
+ add_special_tokens: bool = True,
954
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
955
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
956
+ max_length: Optional[int] = None,
957
+ stride: int = 0,
958
+ pad_to_multiple_of: Optional[int] = None,
959
+ return_tensors: Optional[Union[str, TensorType]] = None,
960
+ return_token_type_ids: Optional[bool] = None,
961
+ return_attention_mask: Optional[bool] = None,
962
+ return_overflowing_tokens: bool = False,
963
+ return_special_tokens_mask: bool = False,
964
+ return_offsets_mapping: bool = False,
965
+ return_length: bool = False,
966
+ verbose: bool = True,
967
+ **kwargs,
968
+ ) -> BatchEncoding:
969
+ if return_offsets_mapping:
970
+ raise NotImplementedError(
971
+ "return_offset_mapping is not available when using Python tokenizers. "
972
+ "To use this feature, change your tokenizer to one deriving from "
973
+ "transformers.PreTrainedTokenizerFast. "
974
+ "More information on available tokenizers at "
975
+ "https://github.com/huggingface/transformers/pull/2674"
976
+ )
977
+
978
+ text = self.prepare_table_query(
979
+ table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length
980
+ )
981
+
982
+ # if necessary, perform lower case
983
+ if self.do_lower_case:
984
+ text = text.lower()
985
+
986
+ tokens = self.tokenize(text)
987
+
988
+ return self.prepare_for_model(
989
+ ids=self.convert_tokens_to_ids(tokens),
990
+ add_special_tokens=add_special_tokens,
991
+ padding=padding_strategy.value,
992
+ truncation=truncation_strategy.value,
993
+ max_length=max_length,
994
+ stride=stride,
995
+ pad_to_multiple_of=pad_to_multiple_of,
996
+ return_tensors=return_tensors,
997
+ prepend_batch_axis=True,
998
+ return_attention_mask=return_attention_mask,
999
+ return_token_type_ids=return_token_type_ids,
1000
+ return_overflowing_tokens=return_overflowing_tokens,
1001
+ return_special_tokens_mask=return_special_tokens_mask,
1002
+ return_length=return_length,
1003
+ verbose=verbose,
1004
+ )
1005
+
1006
+ def target_call_func(
1007
+ self,
1008
+ answer: Union[str, List[str]],
1009
+ add_special_tokens: bool = True,
1010
+ padding: Union[bool, str, PaddingStrategy] = False,
1011
+ truncation: Union[bool, str, TruncationStrategy] = None,
1012
+ max_length: Optional[int] = None,
1013
+ stride: int = 0,
1014
+ pad_to_multiple_of: Optional[int] = None,
1015
+ return_tensors: Optional[Union[str, TensorType]] = None,
1016
+ return_token_type_ids: Optional[bool] = None,
1017
+ return_attention_mask: Optional[bool] = None,
1018
+ return_overflowing_tokens: bool = False,
1019
+ return_special_tokens_mask: bool = False,
1020
+ return_offsets_mapping: bool = False,
1021
+ return_length: bool = False,
1022
+ verbose: bool = True,
1023
+ **kwargs,
1024
+ ) -> BatchEncoding:
1025
+ """
1026
+ The method tokenizes and prepares the answer label for the model.
1027
+
1028
+ Args:
1029
+ answer (`str` or `List[str]`):
1030
+ Corresponding answer supervision to the queries for training the model.
1031
+ """
1032
+ is_batched = isinstance(answer, (list, tuple))
1033
+
1034
+ if is_batched:
1035
+ return self.target_batch_encode_plus(
1036
+ answer=answer,
1037
+ add_special_tokens=add_special_tokens,
1038
+ padding=padding,
1039
+ truncation=truncation,
1040
+ max_length=max_length,
1041
+ pad_to_multiple_of=pad_to_multiple_of,
1042
+ return_tensors=return_tensors,
1043
+ return_token_type_ids=return_token_type_ids,
1044
+ return_attention_mask=return_attention_mask,
1045
+ return_overflowing_tokens=return_overflowing_tokens,
1046
+ return_special_tokens_mask=return_special_tokens_mask,
1047
+ return_offsets_mapping=return_offsets_mapping,
1048
+ return_length=return_length,
1049
+ verbose=verbose,
1050
+ **kwargs,
1051
+ )
1052
+ else:
1053
+ return self.target_encode_plus(
1054
+ answer=answer,
1055
+ add_special_tokens=add_special_tokens,
1056
+ padding=padding,
1057
+ truncation=truncation,
1058
+ max_length=max_length,
1059
+ pad_to_multiple_of=pad_to_multiple_of,
1060
+ return_tensors=return_tensors,
1061
+ return_token_type_ids=return_token_type_ids,
1062
+ return_attention_mask=return_attention_mask,
1063
+ return_overflowing_tokens=return_overflowing_tokens,
1064
+ return_special_tokens_mask=return_special_tokens_mask,
1065
+ return_offsets_mapping=return_offsets_mapping,
1066
+ return_length=return_length,
1067
+ verbose=verbose,
1068
+ **kwargs,
1069
+ )
1070
+
1071
+ def target_batch_encode_plus(
1072
+ self,
1073
+ answer: List[str],
1074
+ add_special_tokens: bool = True,
1075
+ padding: Union[bool, str, PaddingStrategy] = False,
1076
+ truncation: Union[bool, str] = None,
1077
+ max_length: Optional[int] = None,
1078
+ pad_to_multiple_of: Optional[int] = None,
1079
+ return_tensors: Optional[Union[str, TensorType]] = None,
1080
+ return_token_type_ids: Optional[bool] = None,
1081
+ return_attention_mask: Optional[bool] = None,
1082
+ return_overflowing_tokens: bool = False,
1083
+ return_special_tokens_mask: bool = False,
1084
+ return_offsets_mapping: bool = False,
1085
+ return_length: bool = False,
1086
+ verbose: bool = True,
1087
+ **kwargs,
1088
+ ) -> BatchEncoding:
1089
+ """
1090
+ Prepare answer strings for the model.
1091
+
1092
+ Args:
1093
+ answer `List[str]`:
1094
+ Corresponding answer supervision to the queries for training the model.
1095
+ """
1096
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1097
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1098
+ padding=padding,
1099
+ truncation=truncation,
1100
+ max_length=max_length,
1101
+ pad_to_multiple_of=pad_to_multiple_of,
1102
+ verbose=verbose,
1103
+ **kwargs,
1104
+ )
1105
+
1106
+ return self._target_batch_encode_plus(
1107
+ answer=answer,
1108
+ add_special_tokens=add_special_tokens,
1109
+ padding_strategy=padding_strategy,
1110
+ truncation_strategy=truncation_strategy,
1111
+ max_length=max_length,
1112
+ pad_to_multiple_of=pad_to_multiple_of,
1113
+ return_tensors=return_tensors,
1114
+ return_token_type_ids=return_token_type_ids,
1115
+ return_attention_mask=return_attention_mask,
1116
+ return_overflowing_tokens=return_overflowing_tokens,
1117
+ return_special_tokens_mask=return_special_tokens_mask,
1118
+ return_offsets_mapping=return_offsets_mapping,
1119
+ return_length=return_length,
1120
+ verbose=verbose,
1121
+ **kwargs,
1122
+ )
1123
+
1124
+ def _target_batch_encode_plus(
1125
+ self,
1126
+ answer: List[str],
1127
+ add_special_tokens: bool = True,
1128
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1129
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1130
+ max_length: Optional[int] = None,
1131
+ stride: int = 0,
1132
+ pad_to_multiple_of: Optional[int] = None,
1133
+ return_tensors: Optional[Union[str, TensorType]] = None,
1134
+ return_token_type_ids: Optional[bool] = None,
1135
+ return_attention_mask: Optional[bool] = None,
1136
+ return_overflowing_tokens: bool = False,
1137
+ return_special_tokens_mask: bool = False,
1138
+ return_offsets_mapping: bool = False,
1139
+ return_length: bool = False,
1140
+ verbose: bool = True,
1141
+ **kwargs,
1142
+ ) -> BatchEncoding:
1143
+ batch_outputs = {}
1144
+ for text in answer:
1145
+ if self.do_lower_case:
1146
+ text = text.lower()
1147
+
1148
+ tokens = self.tokenize(text)
1149
+ outputs = self.prepare_for_model(
1150
+ ids=self.convert_tokens_to_ids(tokens),
1151
+ add_special_tokens=add_special_tokens,
1152
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
1153
+ truncation=truncation_strategy.value,
1154
+ max_length=max_length,
1155
+ stride=stride,
1156
+ pad_to_multiple_of=None, # we pad in batch afterwards
1157
+ return_attention_mask=False, # we pad in batch afterwards
1158
+ return_token_type_ids=return_token_type_ids,
1159
+ return_overflowing_tokens=return_overflowing_tokens,
1160
+ return_special_tokens_mask=return_special_tokens_mask,
1161
+ return_length=return_length,
1162
+ return_tensors=None, # We convert the whole batch to tensors at the end
1163
+ prepend_batch_axis=False,
1164
+ verbose=verbose,
1165
+ )
1166
+
1167
+ for key, value in outputs.items():
1168
+ if key not in batch_outputs:
1169
+ batch_outputs[key] = []
1170
+ batch_outputs[key].append(value)
1171
+
1172
+ batch_outputs = self.pad(
1173
+ batch_outputs,
1174
+ padding=padding_strategy.value,
1175
+ max_length=max_length,
1176
+ pad_to_multiple_of=pad_to_multiple_of,
1177
+ return_attention_mask=return_attention_mask,
1178
+ )
1179
+
1180
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
1181
+
1182
+ return BatchEncoding(batch_outputs)
1183
+
1184
+ def target_encode(
1185
+ self,
1186
+ answer: str,
1187
+ add_special_tokens: bool = True,
1188
+ padding: Union[bool, str, PaddingStrategy] = False,
1189
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
1190
+ max_length: Optional[int] = None,
1191
+ return_tensors: Optional[Union[str, TensorType]] = None,
1192
+ **kwargs,
1193
+ ) -> List[int]:
1194
+ """
1195
+ Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.
1196
+ which are necessary for the model to work correctly. Use this method if you want to build your processing on
1197
+ your own, otherwise refer to `__call__`.
1198
+
1199
+ Args:
1200
+ answer `str`:
1201
+ Corresponding answer supervision to the queries for training the model
1202
+ """
1203
+ encoded_outputs = self.target_encode_plus(
1204
+ answer=answer,
1205
+ add_special_tokens=add_special_tokens,
1206
+ padding=padding,
1207
+ truncation=truncation,
1208
+ max_length=max_length,
1209
+ return_tensors=return_tensors,
1210
+ **kwargs,
1211
+ )
1212
+
1213
+ return encoded_outputs["input_ids"]
1214
+
1215
+ def target_encode_plus(
1216
+ self,
1217
+ answer: str,
1218
+ add_special_tokens: bool = True,
1219
+ padding: Union[bool, str, PaddingStrategy] = False,
1220
+ truncation: Union[bool, str] = None,
1221
+ max_length: Optional[int] = None,
1222
+ pad_to_multiple_of: Optional[int] = None,
1223
+ return_tensors: Optional[Union[str, TensorType]] = None,
1224
+ return_token_type_ids: Optional[bool] = None,
1225
+ return_attention_mask: Optional[bool] = None,
1226
+ return_special_tokens_mask: bool = False,
1227
+ return_offsets_mapping: bool = False,
1228
+ return_length: bool = False,
1229
+ verbose: bool = True,
1230
+ **kwargs,
1231
+ ) -> BatchEncoding:
1232
+ """
1233
+ Prepare a answer string for the model.
1234
+
1235
+ Args:
1236
+ answer `str`:
1237
+ Corresponding answer supervision to the queries for training the model.
1238
+ """
1239
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1240
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1241
+ padding=padding,
1242
+ truncation=truncation,
1243
+ max_length=max_length,
1244
+ pad_to_multiple_of=pad_to_multiple_of,
1245
+ verbose=verbose,
1246
+ **kwargs,
1247
+ )
1248
+
1249
+ return self._target_encode_plus(
1250
+ answer=answer,
1251
+ add_special_tokens=add_special_tokens,
1252
+ padding_strategy=padding_strategy,
1253
+ truncation_strategy=truncation_strategy,
1254
+ max_length=max_length,
1255
+ pad_to_multiple_of=pad_to_multiple_of,
1256
+ return_tensors=return_tensors,
1257
+ return_token_type_ids=return_token_type_ids,
1258
+ return_attention_mask=return_attention_mask,
1259
+ return_special_tokens_mask=return_special_tokens_mask,
1260
+ return_offsets_mapping=return_offsets_mapping,
1261
+ return_length=return_length,
1262
+ verbose=verbose,
1263
+ **kwargs,
1264
+ )
1265
+
1266
+ def _target_encode_plus(
1267
+ self,
1268
+ answer: str,
1269
+ add_special_tokens: bool = True,
1270
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1271
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1272
+ max_length: Optional[int] = None,
1273
+ stride: int = 0,
1274
+ pad_to_multiple_of: Optional[int] = None,
1275
+ return_tensors: Optional[Union[str, TensorType]] = None,
1276
+ return_token_type_ids: Optional[bool] = None,
1277
+ return_attention_mask: Optional[bool] = None,
1278
+ return_overflowing_tokens: bool = False,
1279
+ return_special_tokens_mask: bool = False,
1280
+ return_offsets_mapping: bool = False,
1281
+ return_length: bool = False,
1282
+ verbose: bool = True,
1283
+ **kwargs,
1284
+ ) -> BatchEncoding:
1285
+ if return_offsets_mapping:
1286
+ raise NotImplementedError(
1287
+ "return_offset_mapping is not available when using Python tokenizers. "
1288
+ "To use this feature, change your tokenizer to one deriving from "
1289
+ "transformers.PreTrainedTokenizerFast. "
1290
+ "More information on available tokenizers at "
1291
+ "https://github.com/huggingface/transformers/pull/2674"
1292
+ )
1293
+
1294
+ text = answer
1295
+
1296
+ # if necessary, perform lower case
1297
+ if self.do_lower_case:
1298
+ text = text.lower()
1299
+
1300
+ tokens = self.tokenize(text)
1301
+
1302
+ return self.prepare_for_model(
1303
+ ids=self.convert_tokens_to_ids(tokens),
1304
+ add_special_tokens=add_special_tokens,
1305
+ padding=padding_strategy.value,
1306
+ truncation=truncation_strategy.value,
1307
+ max_length=max_length,
1308
+ stride=stride,
1309
+ pad_to_multiple_of=pad_to_multiple_of,
1310
+ return_tensors=return_tensors,
1311
+ prepend_batch_axis=True,
1312
+ return_attention_mask=return_attention_mask,
1313
+ return_token_type_ids=return_token_type_ids,
1314
+ return_overflowing_tokens=return_overflowing_tokens,
1315
+ return_special_tokens_mask=return_special_tokens_mask,
1316
+ return_length=return_length,
1317
+ verbose=verbose,
1318
+ )
1319
+
1320
+ def prepare_table_query(
1321
+ self,
1322
+ table,
1323
+ query,
1324
+ answer=None,
1325
+ truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy],
1326
+ max_length=None,
1327
+ ):
1328
+ """
1329
+ This method can be used to linearize a table and add a corresponding query.
1330
+
1331
+ Optionally, it also handles truncation of the table (cells).
1332
+
1333
+ An answer can be provided for more precise truncation.
1334
+ """
1335
+ if not table.empty:
1336
+ # step 1: create table dictionary
1337
+ table_content = {"header": list(table.columns), "rows": [list(row.values) for i, row in table.iterrows()]}
1338
+
1339
+ # step 2: modify table internally
1340
+ # always truncate table cells based on self.max_cell_length
1341
+ # optionally truncate rows if truncation_strategy is set to it
1342
+ self.truncate_table_cells(table_content, query, answer)
1343
+ if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT:
1344
+ self.truncate_table_rows(table_content, query, answer, max_length=max_length)
1345
+
1346
+ # step 3: linearize table
1347
+ linear_table = self.table_linearize.process_table(table_content)
1348
+ else:
1349
+ linear_table = ""
1350
+
1351
+ if linear_table == "":
1352
+ logger.warning(
1353
+ "You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). "
1354
+ + f"Please carefully check the corresponding table with the query : {query}."
1355
+ )
1356
+ if query == "":
1357
+ logger.warning("You provide nothing to query with respect to the table.")
1358
+ # step 4: concatenate query with linear_table
1359
+ separator = " " if query and linear_table else ""
1360
+ joint_input = (query + separator + linear_table) if query else linear_table
1361
+
1362
+ return joint_input
1363
+
1364
+ def truncate_table_cells(self, table_content: Dict, question: str, answer: List):
1365
+ # TODO (Qian): is it possible to revert the original cell if it is in the final answer?
1366
+ cell_mapping = {}
1367
+ for row in table_content["rows"]:
1368
+ for i, cell in enumerate(row):
1369
+ truncate_cell = self.truncate_cell(cell)
1370
+ if truncate_cell is not None:
1371
+ cell_mapping[cell] = truncate_cell
1372
+ row[i] = truncate_cell
1373
+
1374
+ # modify the answer list
1375
+ if answer is not None:
1376
+ for i, case in enumerate(answer):
1377
+ if case in cell_mapping.keys():
1378
+ answer[i] = cell_mapping[case]
1379
+
1380
+ def truncate_cell(self, cell_value):
1381
+ # do not process on these cases
1382
+ if isinstance(cell_value, int) or isinstance(cell_value, float):
1383
+ return cell_value
1384
+ if cell_value.strip() != "":
1385
+ try_tokens = self.tokenize(cell_value)
1386
+ if len(try_tokens) >= self.max_cell_length:
1387
+ retain_tokens = try_tokens[: self.max_cell_length]
1388
+ retain_cell_value = self.convert_tokens_to_string(retain_tokens)
1389
+ return retain_cell_value
1390
+ else:
1391
+ return None
1392
+ else:
1393
+ return cell_value
1394
+
1395
+ def truncate_table_rows(
1396
+ self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]] = None, max_length=None
1397
+ ):
1398
+ """
1399
+ Args:
1400
+ table_content:
1401
+ {"header": xxx, "rows": xxx, "id" (Optionally): xxx}
1402
+
1403
+ question:
1404
+ natural language sentence
1405
+
1406
+ answer:
1407
+ if for training, is the supervision; otherwise will be empty
1408
+ """
1409
+ delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length)
1410
+ # randomly delete unrelated rows
1411
+ self.delete_unrelated_rows(table_content, question, answer, delete_ratio)
1412
+ # guarantee the result < max_length
1413
+ maximum_keep_rows = 0
1414
+ for ind, row_example in enumerate(table_content["rows"]):
1415
+ value_string = self.table_linearize.process_row(row_example, ind + 1)
1416
+ value_token_len = len(self.tokenize(value_string))
1417
+ # over the size limit, and take action
1418
+ if value_token_len > remain_token_len:
1419
+ break
1420
+ remain_token_len -= value_token_len
1421
+ maximum_keep_rows += 1
1422
+ del table_content["rows"][maximum_keep_rows:]
1423
+
1424
+ def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None):
1425
+ if "header" not in table_content or "rows" not in table_content:
1426
+ raise ValueError("The table content should contain both 'header' and 'rows' keys.")
1427
+ # calculate the tokens of header, special tokens will only be pre-prepended into question
1428
+ question_tokens = self.tokenize(question, add_special_tokens=True)
1429
+ # calculate the tokens of header
1430
+ header_string = self.table_linearize.process_header(table_content["header"])
1431
+ header_tokens = self.tokenize(header_string, add_special_tokens=False)
1432
+ # split all cell values into tokens and see how many can be accommodated
1433
+ used_token_len = len(question_tokens) + len(header_tokens)
1434
+ # remaining token space for rows
1435
+ remain_token_len = max_length - used_token_len
1436
+
1437
+ value_string = ""
1438
+ for _, row_example in enumerate(table_content["rows"]):
1439
+ # use a general index to roughly estimate the overall token len
1440
+ value_string += self.table_linearize.process_row(row_example, 100) + " "
1441
+ value_token_len = len(self.tokenize(value_string))
1442
+
1443
+ if value_token_len < remain_token_len:
1444
+ # no row will be deleted
1445
+ return 0.0, remain_token_len
1446
+ else:
1447
+ # calc a roughly delete rate
1448
+ return 1.0 - remain_token_len / value_token_len, remain_token_len
1449
+
1450
+ def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float):
1451
+ """
1452
+ The argument answer is used only during training.
1453
+ """
1454
+ truncated_unrelated_indices = []
1455
+ related_indices = []
1456
+ if answer is None or len(answer) == 0:
1457
+ answer_set = set()
1458
+ else:
1459
+ answer_set = {ans_ex.lower() for ans_ex in answer}
1460
+ # add question key words into answer set
1461
+ if question is not None:
1462
+ answer_set.update(question.split())
1463
+ question_set = set(question.strip("?!.,").split(" "))
1464
+ row_max_len = len(table_content["rows"])
1465
+ for _row_idx, row in enumerate(table_content["rows"]):
1466
+ lower_row = {str(cell).lower() for cell in row}
1467
+ if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0:
1468
+ truncated_unrelated_indices.append(_row_idx)
1469
+ else:
1470
+ # add neighbours to preserve information aggressively
1471
+ related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2])
1472
+
1473
+ # remove the neighbours
1474
+ truncated_unrelated_indices = [
1475
+ _row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices
1476
+ ]
1477
+ # select some cases to drop
1478
+ drop_items = min(len(truncated_unrelated_indices), int(len(table_content["rows"]) * delete_ratio))
1479
+ drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items)
1480
+
1481
+ for _row_idx in reversed(range(row_max_len)):
1482
+ if _row_idx in drop_row_indices:
1483
+ del table_content["rows"][_row_idx]
1484
+
1485
+ # only when the drop ratio is too large, logging for warning.
1486
+ if "id" in table_content and len(drop_row_indices) > 0:
1487
+ logger.warning("Delete {:.2f} rows in table {}".format(len(drop_row_indices), table_content["id"]))
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_trajectory_transformer": [
21
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "TrajectoryTransformerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_trajectory_transformer"] = [
33
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "TrajectoryTransformerModel",
35
+ "TrajectoryTransformerPreTrainedModel",
36
+ "load_tf_weights_in_trajectory_transformer",
37
+ ]
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_trajectory_transformer import (
42
+ TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
43
+ TrajectoryTransformerConfig,
44
+ )
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_trajectory_transformer import (
53
+ TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ TrajectoryTransformerModel,
55
+ TrajectoryTransformerPreTrainedModel,
56
+ load_tf_weights_in_trajectory_transformer,
57
+ )
58
+
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TrajectoryTransformer model configuration"""
16
+
17
+ from ....configuration_utils import PretrainedConfig
18
+ from ....utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
25
+ "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
26
+ ),
27
+ # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
28
+ }
29
+
30
+
31
+ class TrajectoryTransformerConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to
34
+ instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture.
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
36
+ TrajectoryTransformer
37
+ [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2)
38
+ architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 100):
46
+ Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be
47
+ represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`]
48
+ action_weight (`int`, *optional*, defaults to 5):
49
+ Weight of the action in the loss function
50
+ reward_weight (`int`, *optional*, defaults to 1):
51
+ Weight of the reward in the loss function
52
+ value_weight (`int`, *optional*, defaults to 1):
53
+ Weight of the value in the loss function
54
+ block_size (`int`, *optional*, defaults to 249):
55
+ Size of the blocks in the trajectory transformer.
56
+ action_dim (`int`, *optional*, defaults to 6):
57
+ Dimension of the action space.
58
+ observation_dim (`int`, *optional*, defaults to 17):
59
+ Dimension of the observation space.
60
+ transition_dim (`int`, *optional*, defaults to 25):
61
+ Dimension of the transition space.
62
+ n_layer (`int`, *optional*, defaults to 4):
63
+ Number of hidden layers in the Transformer encoder.
64
+ n_head (`int`, *optional*, defaults to 4):
65
+ Number of attention heads for each attention layer in the Transformer encoder.
66
+ n_embd (`int`, *optional*, defaults to 128):
67
+ Dimensionality of the embeddings and hidden states.
68
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
71
+ The dropout ratio for the embeddings.
72
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
73
+ The dropout ratio for the attention.
74
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
75
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
76
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
77
+ max_position_embeddings (`int`, *optional*, defaults to 512):
78
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
79
+ just in case (e.g., 512 or 1024 or 2048).
80
+ initializer_range (`float`, *optional*, defaults to 0.02):
81
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
82
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
83
+ The epsilon used by the layer normalization layers.
84
+ kaiming_initializer_range (`float, *optional*, defaults to 1):
85
+ A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers.
86
+ use_cache (`bool`, *optional*, defaults to `True`):
87
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
88
+ relevant if `config.is_decoder=True`.
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel
93
+
94
+ >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
95
+ >>> configuration = TrajectoryTransformerConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
98
+ >>> model = TrajectoryTransformerModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "trajectory_transformer"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {
107
+ "hidden_size": "n_embd",
108
+ "num_attention_heads": "n_head",
109
+ "num_hidden_layers": "n_layer",
110
+ }
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=100,
115
+ action_weight=5,
116
+ reward_weight=1,
117
+ value_weight=1,
118
+ block_size=249,
119
+ action_dim=6,
120
+ observation_dim=17,
121
+ transition_dim=25,
122
+ n_layer=4,
123
+ n_head=4,
124
+ n_embd=128,
125
+ embd_pdrop=0.1,
126
+ attn_pdrop=0.1,
127
+ resid_pdrop=0.1,
128
+ learning_rate=0.0006,
129
+ max_position_embeddings=512,
130
+ initializer_range=0.02,
131
+ layer_norm_eps=1e-12,
132
+ kaiming_initializer_range=1,
133
+ use_cache=True,
134
+ pad_token_id=1,
135
+ bos_token_id=50256,
136
+ eos_token_id=50256,
137
+ **kwargs,
138
+ ):
139
+ self.vocab_size = vocab_size
140
+ self.action_weight = action_weight
141
+ self.reward_weight = reward_weight
142
+ self.value_weight = value_weight
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.block_size = block_size
145
+ self.action_dim = action_dim
146
+ self.observation_dim = observation_dim
147
+ self.transition_dim = transition_dim
148
+ self.learning_rate = learning_rate
149
+ self.n_layer = n_layer
150
+ self.n_head = n_head
151
+ self.n_embd = n_embd
152
+ self.embd_pdrop = embd_pdrop
153
+ self.attn_pdrop = attn_pdrop
154
+ self.resid_pdrop = resid_pdrop
155
+ self.initializer_range = initializer_range
156
+ self.layer_norm_eps = layer_norm_eps
157
+ self.kaiming_initializer_range = kaiming_initializer_range
158
+ self.use_cache = use_cache
159
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TrajectoryTransformer pytorch checkpoint conversion"""
16
+
17
+ import torch
18
+ import trajectory.utils as utils
19
+
20
+ from transformers import TrajectoryTransformerModel
21
+
22
+
23
+ class Parser(utils.Parser):
24
+ dataset: str = "halfcheetah-medium-expert-v2"
25
+ config: str = "config.offline"
26
+
27
+
28
+ def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device):
29
+ """Converting Sequential blocks to ModuleList"""
30
+
31
+ gpt, gpt_epoch = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device)
32
+ trajectory_transformer = TrajectoryTransformerModel(gpt.config)
33
+
34
+ trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict())
35
+ trajectory_transformer.pos_emb = gpt.pos_emb
36
+ trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict())
37
+ trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict())
38
+ trajectory_transformer.head.load_state_dict(gpt.head.state_dict())
39
+
40
+ for i, block in enumerate(gpt.blocks):
41
+ trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict())
42
+ trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict())
43
+ trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict())
44
+
45
+ trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict())
46
+ trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict())
47
+ trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict())
48
+ trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict())
49
+
50
+ torch.save(trajectory_transformer.state_dict(), "pytorch_model.bin")
51
+
52
+
53
+ if __name__ == "__main__":
54
+ """
55
+ To run this script you will need to install the original repository to run the original model. You can find it
56
+ here: https://github.com/jannerm/trajectory-transformer From this repository code you can also download the
57
+ original pytorch checkpoints.
58
+
59
+ Run with the command:
60
+
61
+ ```sh
62
+ >>> python convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py --dataset <dataset_name>
63
+ ... --gpt_loadpath <path_to_original_pytorch_checkpoint>
64
+ ```
65
+ """
66
+
67
+ args = Parser().parse_args("plan")
68
+ convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(
69
+ args.logbase, args.dataset, args.gpt_loadpath, args.gpt_epoch, args.device
70
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch TrajectoryTransformer model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import functional as F
27
+
28
+ from ....modeling_utils import PreTrainedModel
29
+ from ....utils import (
30
+ ModelOutput,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_trajectory_transformer import TrajectoryTransformerConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
42
+ _CONFIG_FOR_DOC = "TrajectoryTransformerConfig"
43
+
44
+ TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
45
+ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2",
46
+ # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
47
+ ]
48
+
49
+
50
+ def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path):
51
+ """Load tf checkpoints in a pytorch model."""
52
+ try:
53
+ import re
54
+
55
+ import numpy as np
56
+ import tensorflow as tf
57
+ except ImportError:
58
+ logger.error(
59
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
60
+ "https://www.tensorflow.org/install/ for installation instructions."
61
+ )
62
+ raise
63
+ tf_path = os.path.abspath(tf_checkpoint_path)
64
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
65
+ # Load weights from TF model
66
+ init_vars = tf.train.list_variables(tf_path)
67
+ names = []
68
+ arrays = []
69
+ for name, shape in init_vars:
70
+ logger.info(f"Loading TF weight {name} with shape {shape}")
71
+ array = tf.train.load_variable(tf_path, name)
72
+ names.append(name)
73
+ arrays.append(array)
74
+
75
+ for name, array in zip(names, arrays):
76
+ name = name.split("/")
77
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
78
+ # which are not required for using pretrained model
79
+ if any(
80
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
81
+ for n in name
82
+ ):
83
+ logger.info(f"Skipping {'/'.join(name)}")
84
+ continue
85
+ pointer = model
86
+ for m_name in name:
87
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
88
+ scope_names = re.split(r"_(\d+)", m_name)
89
+ else:
90
+ scope_names = [m_name]
91
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
92
+ pointer = getattr(pointer, "weight")
93
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
94
+ pointer = getattr(pointer, "bias")
95
+ elif scope_names[0] == "output_weights":
96
+ pointer = getattr(pointer, "weight")
97
+ elif scope_names[0] == "squad":
98
+ pointer = getattr(pointer, "classifier")
99
+ else:
100
+ try:
101
+ pointer = getattr(pointer, scope_names[0])
102
+ except AttributeError:
103
+ logger.info(f"Skipping {'/'.join(name)}")
104
+ continue
105
+ if len(scope_names) >= 2:
106
+ num = int(scope_names[1])
107
+ pointer = pointer[num]
108
+ if m_name[-11:] == "_embeddings":
109
+ pointer = getattr(pointer, "weight")
110
+ elif m_name == "kernel":
111
+ array = np.transpose(array)
112
+ try:
113
+ if pointer.shape != array.shape:
114
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
115
+ except AssertionError as e:
116
+ e.args += (pointer.shape, array.shape)
117
+ raise
118
+ logger.info(f"Initialize PyTorch weight {name}")
119
+ pointer.data = torch.from_numpy(array)
120
+ return model
121
+
122
+
123
+ @dataclass
124
+ class TrajectoryTransformerOutput(ModelOutput):
125
+ """
126
+ Base class for model's outputs that also contains a pooling of the last hidden states.
127
+
128
+ Args:
129
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
130
+ Language modeling loss.
131
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
132
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
133
+ past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
134
+ Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
135
+ sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the
136
+ attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
137
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
138
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
139
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
140
+ plus the initial embedding outputs.
141
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
142
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
143
+ sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average
144
+ in the self-attention heads.
145
+ """
146
+
147
+ loss: Optional[torch.FloatTensor] = None
148
+ logits: torch.FloatTensor = None
149
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
150
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
151
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
152
+
153
+
154
+ class TrajectoryTransformerPreTrainedModel(PreTrainedModel):
155
+ """
156
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
157
+ models.
158
+ """
159
+
160
+ config_class = TrajectoryTransformerConfig
161
+ load_tf_weights = load_tf_weights_in_trajectory_transformer
162
+ base_model_prefix = "trajectory_transformer"
163
+ main_input_name = "trajectories"
164
+ supports_gradient_checkpointing = True
165
+
166
+ def _init_weights(self, module):
167
+ if isinstance(module, (nn.Linear, nn.Embedding)):
168
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
169
+ if isinstance(module, nn.Linear) and module.bias is not None:
170
+ module.bias.data.zero_()
171
+ elif isinstance(module, nn.LayerNorm):
172
+ module.bias.data.zero_()
173
+ module.weight.data.fill_(1.0)
174
+ elif isinstance(module, EinLinear):
175
+ for i in range(module.n_models):
176
+ nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range)
177
+ if module.bias is not None:
178
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i])
179
+ bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range
180
+ nn.init.uniform_(module.bias[i], -bound, bound)
181
+
182
+
183
+ TRAJECTORY_TRANSFORMER_START_DOCSTRING = r"""
184
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
185
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
186
+ behavior.
187
+
188
+ Parameters:
189
+ config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model.
190
+ Initializing with a config file does not load the weights associated with the model, only the
191
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
192
+ """
193
+
194
+ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r"""
195
+ Args:
196
+ trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
197
+ Batch of trajectories, where a trajectory is a sequence of states, actions and rewards.
198
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*):
199
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
200
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
201
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
202
+ targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
203
+ Desired targets used to compute the loss.
204
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
205
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
206
+
207
+ - 1 for tokens that are **not masked**,
208
+ - 0 for tokens that are **masked**.
209
+
210
+ [What are attention masks?](../glossary#attention-mask)
211
+ use_cache (`bool`, *optional*):
212
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
213
+ `past_key_values`).
214
+ output_attentions (`bool`, *optional*):
215
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
216
+ tensors for more detail.
217
+ output_hidden_states (`bool`, *optional*):
218
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
219
+ more detail.
220
+ return_dict (`bool`, *optional*):
221
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
222
+ """
223
+
224
+
225
+ class EinLinear(nn.Module):
226
+ def __init__(self, n_models, in_features, out_features, bias):
227
+ super().__init__()
228
+ self.n_models = n_models
229
+ self.out_features = out_features
230
+ self.in_features = in_features
231
+ self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features))
232
+ if bias:
233
+ self.bias = nn.Parameter(torch.Tensor(n_models, out_features))
234
+ else:
235
+ self.register_parameter("bias", None)
236
+
237
+ def reset_parameters(self):
238
+ for i in range(self.n_models):
239
+ nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))
240
+ if self.bias is not None:
241
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
242
+ bound = 1 / math.sqrt(fan_in)
243
+ nn.init.uniform_(self.bias[i], -bound, bound)
244
+
245
+ def forward(self, input):
246
+ """
247
+ Args:
248
+ input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):
249
+ The input to the layer.
250
+ """
251
+ # [ batch_size x n_models x output_dim ]
252
+ output = torch.einsum("eoi,bei->beo", self.weight, input)
253
+ if self.bias is not None:
254
+ raise RuntimeError()
255
+ return output
256
+
257
+
258
+ class CausalSelfAttention(nn.Module):
259
+ def __init__(self, config):
260
+ super().__init__()
261
+
262
+ if config.n_embd % config.n_head != 0:
263
+ raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})")
264
+
265
+ # key, query, value projections for all heads
266
+ self.key = nn.Linear(config.n_embd, config.n_embd)
267
+ self.query = nn.Linear(config.n_embd, config.n_embd)
268
+ self.value = nn.Linear(config.n_embd, config.n_embd)
269
+
270
+ # regularization
271
+ self.attn_drop = nn.Dropout(config.attn_pdrop)
272
+ self.resid_drop = nn.Dropout(config.resid_pdrop)
273
+
274
+ # output projection
275
+ self.proj = nn.Linear(config.n_embd, config.n_embd)
276
+
277
+ # causal mask to ensure that attention is only applied to the left in the input sequence
278
+ self.register_buffer(
279
+ "mask",
280
+ torch.tril(torch.ones(config.block_size, config.block_size)).view(
281
+ 1, 1, config.block_size, config.block_size
282
+ ),
283
+ persistent=False,
284
+ )
285
+
286
+ # mask previous value estimates
287
+ joined_dim = config.observation_dim + config.action_dim + 2
288
+ self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0
289
+
290
+ self.n_head = config.n_head
291
+
292
+ def forward(
293
+ self,
294
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
295
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
296
+ use_cache: Optional[bool] = False,
297
+ output_attentions: Optional[bool] = False,
298
+ ):
299
+ batch_size, sequence_length, embedding_dim = hidden_states.size()
300
+
301
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
302
+ # [ batch_size x n_heads x sequence_length x head_dim ]
303
+ key = (
304
+ self.key(hidden_states)
305
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
306
+ .transpose(1, 2)
307
+ )
308
+ query = (
309
+ self.query(hidden_states)
310
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
311
+ .transpose(1, 2)
312
+ )
313
+ value = (
314
+ self.value(hidden_states)
315
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
316
+ .transpose(1, 2)
317
+ )
318
+
319
+ if layer_past is not None:
320
+ past_key, past_value = layer_past
321
+ key = torch.cat((past_key, key), dim=-2)
322
+ value = torch.cat((past_value, value), dim=-2)
323
+
324
+ if use_cache is True:
325
+ present = (key, value)
326
+ else:
327
+ present = None
328
+
329
+ # causal self-attention
330
+ # [ batch_size x n_heads x sequence_length x sequence_length ]
331
+ attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1)))
332
+ attn_weights = attn_weights.masked_fill(
333
+ self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min
334
+ )
335
+ attn_weights = F.softmax(attn_weights, dim=-1)
336
+ self._attn_map = attn_weights.clone()
337
+ attn_weights = self.attn_drop(attn_weights)
338
+
339
+ output = torch.matmul(attn_weights, value)
340
+ # [ batch_size x sequence_length x embedding_dim ]
341
+ # re-assemble all head outputs side by side
342
+ output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim)
343
+
344
+ # output projection
345
+ output = self.resid_drop(self.proj(output))
346
+
347
+ outputs = (output, present)
348
+ if output_attentions:
349
+ outputs += (attn_weights,)
350
+
351
+ return outputs
352
+
353
+
354
+ class Block(nn.Module):
355
+ def __init__(self, config):
356
+ super().__init__()
357
+ self.ln1 = nn.LayerNorm(config.n_embd)
358
+ self.ln2 = nn.LayerNorm(config.n_embd)
359
+ self.attn = CausalSelfAttention(config)
360
+
361
+ # MLP
362
+ self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd)
363
+ self.act = nn.GELU()
364
+ self.l2 = nn.Linear(4 * config.n_embd, config.n_embd)
365
+ self.drop = nn.Dropout(config.resid_pdrop)
366
+
367
+ def forward(
368
+ self,
369
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
370
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
371
+ use_cache: Optional[bool] = False,
372
+ output_attentions: Optional[bool] = False,
373
+ ):
374
+ residual = hidden_states
375
+ hidden_states = self.ln1(hidden_states)
376
+
377
+ attn_outputs = self.attn(
378
+ hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions
379
+ )
380
+ attn_output = attn_outputs[0]
381
+ outputs = attn_outputs[1:]
382
+ hidden_states = attn_output + residual
383
+
384
+ residual = hidden_states
385
+ hidden_states = self.ln2(hidden_states)
386
+ hidden_states = self.l1(hidden_states)
387
+ hidden_states = self.act(hidden_states)
388
+ hidden_states = self.l2(hidden_states)
389
+ hidden_states = residual + self.drop(hidden_states)
390
+
391
+ if use_cache:
392
+ outputs = (hidden_states,) + outputs
393
+ else:
394
+ outputs = (hidden_states,) + outputs[1:]
395
+
396
+ return outputs
397
+
398
+
399
+ @add_start_docstrings(
400
+ "The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.",
401
+ TRAJECTORY_TRANSFORMER_START_DOCSTRING,
402
+ )
403
+ class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
404
+ """the full GPT language model, with a context size of block_size"""
405
+
406
+ def __init__(self, config):
407
+ super().__init__(config)
408
+
409
+ # input embedding stem (+1 for stop token)
410
+ self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd)
411
+
412
+ self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
413
+ self.drop = nn.Dropout(config.embd_pdrop)
414
+ # transformer
415
+ self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
416
+ # decoder head
417
+ self.ln_f = nn.LayerNorm(config.n_embd)
418
+ self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False)
419
+
420
+ self.vocab_size = config.vocab_size
421
+ self.stop_token = config.vocab_size * config.transition_dim
422
+ self.block_size = config.block_size
423
+
424
+ self.observation_dim = config.observation_dim
425
+ self.action_dim = config.action_dim
426
+ self.transition_dim = config.transition_dim
427
+ self.embedding_dim = config.n_embd
428
+
429
+ self.action_weight = config.action_weight
430
+ self.reward_weight = config.reward_weight
431
+ self.value_weight = config.value_weight
432
+
433
+ self.gradient_checkpointing = False
434
+
435
+ self.post_init()
436
+
437
+ def get_block_size(self):
438
+ return self.block_size
439
+
440
+ def offset_tokens(self, trajectories):
441
+ _, sequence_length = trajectories.shape
442
+
443
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
444
+
445
+ offsets = torch.arange(self.transition_dim) * self.vocab_size
446
+ offsets = offsets.repeat(n_states).to(trajectories.device)
447
+
448
+ offset_trajectories = trajectories + offsets[:sequence_length]
449
+ offset_trajectories[trajectories == self.vocab_size] = self.stop_token
450
+ return offset_trajectories
451
+
452
+ def pad_to_full_observation(self, hidden_states):
453
+ batch_size, sequence_length, _ = hidden_states.shape
454
+
455
+ n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim
456
+ padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device)
457
+
458
+ # [ batch_size x padded_sequence_length' x embedding_dim ]
459
+ hidden_states_pad = torch.cat([hidden_states, padding], dim=1)
460
+ hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim)
461
+
462
+ return hidden_states_pad, n_pad
463
+
464
+ @add_start_docstrings_to_model_forward(
465
+ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
466
+ )
467
+ @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC)
468
+ def forward(
469
+ self,
470
+ trajectories: Optional[torch.LongTensor] = None,
471
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
472
+ targets: Optional[torch.FloatTensor] = None,
473
+ attention_mask: Optional[torch.FloatTensor] = None,
474
+ use_cache: Optional[bool] = None,
475
+ output_attentions: Optional[bool] = None,
476
+ output_hidden_states: Optional[bool] = None,
477
+ return_dict: Optional[bool] = None,
478
+ ) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]:
479
+ r"""
480
+ Returns:
481
+
482
+ Examples:
483
+
484
+ ```python
485
+ >>> from transformers import TrajectoryTransformerModel
486
+ >>> import torch
487
+
488
+ >>> model = TrajectoryTransformerModel.from_pretrained(
489
+ ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
490
+ ... )
491
+ >>> model.to(device)
492
+ >>> model.eval()
493
+
494
+ >>> observations_dim, action_dim, batch_size = 17, 6, 256
495
+ >>> seq_length = observations_dim + action_dim + 1
496
+
497
+ >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(
498
+ ... device
499
+ ... )
500
+ >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device)
501
+
502
+ >>> outputs = model(
503
+ ... trajectories,
504
+ ... targets=targets,
505
+ ... use_cache=True,
506
+ ... output_attentions=True,
507
+ ... output_hidden_states=True,
508
+ ... return_dict=True,
509
+ ... )
510
+ ```
511
+ """
512
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
513
+ output_hidden_states = (
514
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
515
+ )
516
+
517
+ if past_key_values is None:
518
+ past_key_values = tuple([None] * len(self.blocks))
519
+
520
+ batch_size, sequence_length = trajectories.size()
521
+
522
+ if sequence_length > self.block_size:
523
+ raise ValueError("Cannot forward, model block size is exhausted.")
524
+
525
+ offset_trajectories = self.offset_tokens(trajectories)
526
+ # [ batch_size x sequence_length x embedding_dim ]
527
+ # forward the GPT model
528
+ token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector
529
+ position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector
530
+
531
+ hidden_states = self.drop(token_embeddings + position_embeddings)
532
+
533
+ if self.gradient_checkpointing and self.training:
534
+ if use_cache:
535
+ logger.warning_once(
536
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
537
+ )
538
+ use_cache = False
539
+
540
+ presents = () if use_cache else None
541
+ all_self_attentions = () if output_attentions else None
542
+ all_hidden_states = () if output_hidden_states else None
543
+
544
+ for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)):
545
+ if output_hidden_states:
546
+ all_hidden_states = all_hidden_states + (hidden_states,)
547
+
548
+ if self.gradient_checkpointing and self.training:
549
+ outputs = self._gradient_checkpointing_func(
550
+ block.__call__,
551
+ hidden_states,
552
+ layer_past,
553
+ use_cache,
554
+ output_attentions,
555
+ )
556
+ else:
557
+ outputs = block(hidden_states, layer_past, use_cache, output_attentions)
558
+
559
+ hidden_states = outputs[0]
560
+ if use_cache is True:
561
+ presents = presents + (outputs[1],)
562
+
563
+ if output_attentions:
564
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
565
+
566
+ # [ batch_size x sequence_length x embedding_dim ]
567
+ hidden_state = self.ln_f(hidden_states)
568
+
569
+ if output_hidden_states:
570
+ all_hidden_states = all_hidden_states + (hidden_states,)
571
+
572
+ hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state)
573
+
574
+ logits = self.head(hidden_states_pad)
575
+ logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1)
576
+ logits = logits[:, :sequence_length]
577
+
578
+ # if we are given some desired targets also calculate the loss
579
+ if targets is not None:
580
+ loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none")
581
+ if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1:
582
+ # make weights
583
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
584
+ weights = torch.cat(
585
+ [
586
+ torch.ones(self.observation_dim, device=trajectories.device),
587
+ torch.ones(self.action_dim, device=trajectories.device) * self.action_weight,
588
+ torch.ones(1, device=trajectories.device) * self.reward_weight,
589
+ torch.ones(1, device=trajectories.device) * self.value_weight,
590
+ ]
591
+ )
592
+ weights = weights.repeat(n_states)
593
+ weights = weights[1:].repeat(batch_size, 1)
594
+ loss = loss * weights.view(-1)
595
+ loss = (loss * attention_mask.view(-1)).mean()
596
+ else:
597
+ loss = None
598
+
599
+ if not return_dict:
600
+ return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)
601
+
602
+ return TrajectoryTransformerOutput(
603
+ loss=loss,
604
+ logits=logits,
605
+ past_key_values=presents,
606
+ hidden_states=all_hidden_states,
607
+ attentions=all_self_attentions,
608
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
22
+ "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_transfo_xl"] = [
32
+ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "AdaptiveEmbedding",
34
+ "TransfoXLForSequenceClassification",
35
+ "TransfoXLLMHeadModel",
36
+ "TransfoXLModel",
37
+ "TransfoXLPreTrainedModel",
38
+ "load_tf_weights_in_transfo_xl",
39
+ ]
40
+
41
+ try:
42
+ if not is_tf_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["modeling_tf_transfo_xl"] = [
48
+ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
49
+ "TFAdaptiveEmbedding",
50
+ "TFTransfoXLForSequenceClassification",
51
+ "TFTransfoXLLMHeadModel",
52
+ "TFTransfoXLMainLayer",
53
+ "TFTransfoXLModel",
54
+ "TFTransfoXLPreTrainedModel",
55
+ ]
56
+
57
+
58
+ if TYPE_CHECKING:
59
+ from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
60
+ from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
61
+
62
+ try:
63
+ if not is_torch_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .modeling_transfo_xl import (
69
+ TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
70
+ AdaptiveEmbedding,
71
+ TransfoXLForSequenceClassification,
72
+ TransfoXLLMHeadModel,
73
+ TransfoXLModel,
74
+ TransfoXLPreTrainedModel,
75
+ load_tf_weights_in_transfo_xl,
76
+ )
77
+
78
+ try:
79
+ if not is_tf_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_tf_transfo_xl import (
85
+ TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
86
+ TFAdaptiveEmbedding,
87
+ TFTransfoXLForSequenceClassification,
88
+ TFTransfoXLLMHeadModel,
89
+ TFTransfoXLMainLayer,
90
+ TFTransfoXLModel,
91
+ TFTransfoXLPreTrainedModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc ADDED
Binary file (6.89 kB). View file